Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[pandora-kernel.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
50 #include <linux/io.h>
51 #include <linux/stringify.h>
52
53 #define BNX2X_MAIN
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
58 #include "bnx2x_dcb.h"
59
60 #include <linux/firmware.h>
61 #include "bnx2x_fw_file_hdr.h"
62 /* FW files */
63 #define FW_FILE_VERSION                                 \
64         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
65         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
66         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
67         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
68 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT              (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II "
81                    "BCM57710/57711/57711E/57712/57712E Driver");
82 MODULE_LICENSE("GPL");
83 MODULE_VERSION(DRV_MODULE_VERSION);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1);
85 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86 MODULE_FIRMWARE(FW_FILE_NAME_E2);
87
88 static int multi_mode = 1;
89 module_param(multi_mode, int, 0);
90 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91                              "(0 Disable; 1 Enable (default))");
92
93 int num_queues;
94 module_param(num_queues, int, 0);
95 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96                                 " (default is as a number of CPUs)");
97
98 static int disable_tpa;
99 module_param(disable_tpa, int, 0);
100 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101
102 static int int_mode;
103 module_param(int_mode, int, 0);
104 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105                                 "(1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static struct workqueue_struct *bnx2x_wq;
124
125 #ifdef BCM_CNIC
126 static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127 #endif
128
129 enum bnx2x_board_type {
130         BCM57710 = 0,
131         BCM57711 = 1,
132         BCM57711E = 2,
133         BCM57712 = 3,
134         BCM57712E = 4
135 };
136
137 /* indexed by board_type, above */
138 static struct {
139         char *name;
140 } board_info[] __devinitdata = {
141         { "Broadcom NetXtreme II BCM57710 XGb" },
142         { "Broadcom NetXtreme II BCM57711 XGb" },
143         { "Broadcom NetXtreme II BCM57711E XGb" },
144         { "Broadcom NetXtreme II BCM57712 XGb" },
145         { "Broadcom NetXtreme II BCM57712E XGb" }
146 };
147
148 #ifndef PCI_DEVICE_ID_NX2_57712
149 #define PCI_DEVICE_ID_NX2_57712         0x1662
150 #endif
151 #ifndef PCI_DEVICE_ID_NX2_57712E
152 #define PCI_DEVICE_ID_NX2_57712E        0x1663
153 #endif
154
155 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
156         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
157         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
158         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
159         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
160         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
161         { 0 }
162 };
163
164 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
165
166 /****************************************************************************
167 * General service functions
168 ****************************************************************************/
169
170 static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
171                                        u32 addr, dma_addr_t mapping)
172 {
173         REG_WR(bp,  addr, U64_LO(mapping));
174         REG_WR(bp,  addr + 4, U64_HI(mapping));
175 }
176
177 static inline void __storm_memset_fill(struct bnx2x *bp,
178                                        u32 addr, size_t size, u32 val)
179 {
180         int i;
181         for (i = 0; i < size/4; i++)
182                 REG_WR(bp,  addr + (i * 4), val);
183 }
184
185 static inline void storm_memset_ustats_zero(struct bnx2x *bp,
186                                             u8 port, u16 stat_id)
187 {
188         size_t size = sizeof(struct ustorm_per_client_stats);
189
190         u32 addr = BAR_USTRORM_INTMEM +
191                         USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
192
193         __storm_memset_fill(bp, addr, size, 0);
194 }
195
196 static inline void storm_memset_tstats_zero(struct bnx2x *bp,
197                                             u8 port, u16 stat_id)
198 {
199         size_t size = sizeof(struct tstorm_per_client_stats);
200
201         u32 addr = BAR_TSTRORM_INTMEM +
202                         TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
203
204         __storm_memset_fill(bp, addr, size, 0);
205 }
206
207 static inline void storm_memset_xstats_zero(struct bnx2x *bp,
208                                             u8 port, u16 stat_id)
209 {
210         size_t size = sizeof(struct xstorm_per_client_stats);
211
212         u32 addr = BAR_XSTRORM_INTMEM +
213                         XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
214
215         __storm_memset_fill(bp, addr, size, 0);
216 }
217
218
219 static inline void storm_memset_spq_addr(struct bnx2x *bp,
220                                          dma_addr_t mapping, u16 abs_fid)
221 {
222         u32 addr = XSEM_REG_FAST_MEMORY +
223                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
224
225         __storm_memset_dma_mapping(bp, addr, mapping);
226 }
227
228 static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
229 {
230         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
231 }
232
233 static inline void storm_memset_func_cfg(struct bnx2x *bp,
234                                 struct tstorm_eth_function_common_config *tcfg,
235                                 u16 abs_fid)
236 {
237         size_t size = sizeof(struct tstorm_eth_function_common_config);
238
239         u32 addr = BAR_TSTRORM_INTMEM +
240                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
241
242         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
243 }
244
245 static inline void storm_memset_xstats_flags(struct bnx2x *bp,
246                                 struct stats_indication_flags *flags,
247                                 u16 abs_fid)
248 {
249         size_t size = sizeof(struct stats_indication_flags);
250
251         u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
252
253         __storm_memset_struct(bp, addr, size, (u32 *)flags);
254 }
255
256 static inline void storm_memset_tstats_flags(struct bnx2x *bp,
257                                 struct stats_indication_flags *flags,
258                                 u16 abs_fid)
259 {
260         size_t size = sizeof(struct stats_indication_flags);
261
262         u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
263
264         __storm_memset_struct(bp, addr, size, (u32 *)flags);
265 }
266
267 static inline void storm_memset_ustats_flags(struct bnx2x *bp,
268                                 struct stats_indication_flags *flags,
269                                 u16 abs_fid)
270 {
271         size_t size = sizeof(struct stats_indication_flags);
272
273         u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
274
275         __storm_memset_struct(bp, addr, size, (u32 *)flags);
276 }
277
278 static inline void storm_memset_cstats_flags(struct bnx2x *bp,
279                                 struct stats_indication_flags *flags,
280                                 u16 abs_fid)
281 {
282         size_t size = sizeof(struct stats_indication_flags);
283
284         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
285
286         __storm_memset_struct(bp, addr, size, (u32 *)flags);
287 }
288
289 static inline void storm_memset_xstats_addr(struct bnx2x *bp,
290                                            dma_addr_t mapping, u16 abs_fid)
291 {
292         u32 addr = BAR_XSTRORM_INTMEM +
293                 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295         __storm_memset_dma_mapping(bp, addr, mapping);
296 }
297
298 static inline void storm_memset_tstats_addr(struct bnx2x *bp,
299                                            dma_addr_t mapping, u16 abs_fid)
300 {
301         u32 addr = BAR_TSTRORM_INTMEM +
302                 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304         __storm_memset_dma_mapping(bp, addr, mapping);
305 }
306
307 static inline void storm_memset_ustats_addr(struct bnx2x *bp,
308                                            dma_addr_t mapping, u16 abs_fid)
309 {
310         u32 addr = BAR_USTRORM_INTMEM +
311                 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
312
313         __storm_memset_dma_mapping(bp, addr, mapping);
314 }
315
316 static inline void storm_memset_cstats_addr(struct bnx2x *bp,
317                                            dma_addr_t mapping, u16 abs_fid)
318 {
319         u32 addr = BAR_CSTRORM_INTMEM +
320                 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
321
322         __storm_memset_dma_mapping(bp, addr, mapping);
323 }
324
325 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
326                                          u16 pf_id)
327 {
328         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
329                 pf_id);
330         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
331                 pf_id);
332         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
333                 pf_id);
334         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
335                 pf_id);
336 }
337
338 static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
339                                         u8 enable)
340 {
341         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
342                 enable);
343         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
344                 enable);
345         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
346                 enable);
347         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
348                 enable);
349 }
350
351 static inline void storm_memset_eq_data(struct bnx2x *bp,
352                                 struct event_ring_data *eq_data,
353                                 u16 pfid)
354 {
355         size_t size = sizeof(struct event_ring_data);
356
357         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
358
359         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
360 }
361
362 static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
363                                         u16 pfid)
364 {
365         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
366         REG_WR16(bp, addr, eq_prod);
367 }
368
369 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
370                                              u16 fw_sb_id, u8 sb_index,
371                                              u8 ticks)
372 {
373
374         int index_offset = CHIP_IS_E2(bp) ?
375                 offsetof(struct hc_status_block_data_e2, index_data) :
376                 offsetof(struct hc_status_block_data_e1x, index_data);
377         u32 addr = BAR_CSTRORM_INTMEM +
378                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
379                         index_offset +
380                         sizeof(struct hc_index_data)*sb_index +
381                         offsetof(struct hc_index_data, timeout);
382         REG_WR8(bp, addr, ticks);
383         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
384                           port, fw_sb_id, sb_index, ticks);
385 }
386 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
387                                              u16 fw_sb_id, u8 sb_index,
388                                              u8 disable)
389 {
390         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
391         int index_offset = CHIP_IS_E2(bp) ?
392                 offsetof(struct hc_status_block_data_e2, index_data) :
393                 offsetof(struct hc_status_block_data_e1x, index_data);
394         u32 addr = BAR_CSTRORM_INTMEM +
395                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
396                         index_offset +
397                         sizeof(struct hc_index_data)*sb_index +
398                         offsetof(struct hc_index_data, flags);
399         u16 flags = REG_RD16(bp, addr);
400         /* clear and set */
401         flags &= ~HC_INDEX_DATA_HC_ENABLED;
402         flags |= enable_flag;
403         REG_WR16(bp, addr, flags);
404         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
405                           port, fw_sb_id, sb_index, disable);
406 }
407
408 /* used only at init
409  * locking is done by mcp
410  */
411 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
412 {
413         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
414         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
415         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
416                                PCICFG_VENDOR_ID_OFFSET);
417 }
418
419 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
420 {
421         u32 val;
422
423         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
424         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
425         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
426                                PCICFG_VENDOR_ID_OFFSET);
427
428         return val;
429 }
430
431 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
432 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
433 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
434 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
435 #define DMAE_DP_DST_NONE        "dst_addr [none]"
436
437 static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
438                           int msglvl)
439 {
440         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
441
442         switch (dmae->opcode & DMAE_COMMAND_DST) {
443         case DMAE_CMD_DST_PCI:
444                 if (src_type == DMAE_CMD_SRC_PCI)
445                         DP(msglvl, "DMAE: opcode 0x%08x\n"
446                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
447                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
448                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
449                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
450                            dmae->comp_addr_hi, dmae->comp_addr_lo,
451                            dmae->comp_val);
452                 else
453                         DP(msglvl, "DMAE: opcode 0x%08x\n"
454                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
455                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
456                            dmae->opcode, dmae->src_addr_lo >> 2,
457                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
458                            dmae->comp_addr_hi, dmae->comp_addr_lo,
459                            dmae->comp_val);
460                 break;
461         case DMAE_CMD_DST_GRC:
462                 if (src_type == DMAE_CMD_SRC_PCI)
463                         DP(msglvl, "DMAE: opcode 0x%08x\n"
464                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
465                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
466                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
467                            dmae->len, dmae->dst_addr_lo >> 2,
468                            dmae->comp_addr_hi, dmae->comp_addr_lo,
469                            dmae->comp_val);
470                 else
471                         DP(msglvl, "DMAE: opcode 0x%08x\n"
472                            "src [%08x], len [%d*4], dst [%08x]\n"
473                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
474                            dmae->opcode, dmae->src_addr_lo >> 2,
475                            dmae->len, dmae->dst_addr_lo >> 2,
476                            dmae->comp_addr_hi, dmae->comp_addr_lo,
477                            dmae->comp_val);
478                 break;
479         default:
480                 if (src_type == DMAE_CMD_SRC_PCI)
481                         DP(msglvl, "DMAE: opcode 0x%08x\n"
482                            DP_LEVEL "src_addr [%x:%08x]  len [%d * 4]  "
483                                     "dst_addr [none]\n"
484                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
485                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
486                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
487                            dmae->comp_val);
488                 else
489                         DP(msglvl, "DMAE: opcode 0x%08x\n"
490                            DP_LEVEL "src_addr [%08x]  len [%d * 4]  "
491                                     "dst_addr [none]\n"
492                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
493                            dmae->opcode, dmae->src_addr_lo >> 2,
494                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
495                            dmae->comp_val);
496                 break;
497         }
498
499 }
500
501 const u32 dmae_reg_go_c[] = {
502         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
503         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
504         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
505         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
506 };
507
508 /* copy command into DMAE command memory and set DMAE command go */
509 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
510 {
511         u32 cmd_offset;
512         int i;
513
514         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
515         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
516                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
517
518                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
519                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
520         }
521         REG_WR(bp, dmae_reg_go_c[idx], 1);
522 }
523
524 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
525 {
526         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
527                            DMAE_CMD_C_ENABLE);
528 }
529
530 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
531 {
532         return opcode & ~DMAE_CMD_SRC_RESET;
533 }
534
535 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
536                              bool with_comp, u8 comp_type)
537 {
538         u32 opcode = 0;
539
540         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
541                    (dst_type << DMAE_COMMAND_DST_SHIFT));
542
543         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
544
545         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
546         opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
547                    (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
548         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
549
550 #ifdef __BIG_ENDIAN
551         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
552 #else
553         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
554 #endif
555         if (with_comp)
556                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
557         return opcode;
558 }
559
560 static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
561                                       struct dmae_command *dmae,
562                                       u8 src_type, u8 dst_type)
563 {
564         memset(dmae, 0, sizeof(struct dmae_command));
565
566         /* set the opcode */
567         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
568                                          true, DMAE_COMP_PCI);
569
570         /* fill in the completion parameters */
571         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
572         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
573         dmae->comp_val = DMAE_COMP_VAL;
574 }
575
576 /* issue a dmae command over the init-channel and wailt for completion */
577 static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
578                                       struct dmae_command *dmae)
579 {
580         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
581         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
582         int rc = 0;
583
584         DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
585            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
586            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
587
588         /* lock the dmae channel */
589         spin_lock_bh(&bp->dmae_lock);
590
591         /* reset completion */
592         *wb_comp = 0;
593
594         /* post the command on the channel used for initializations */
595         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
596
597         /* wait for completion */
598         udelay(5);
599         while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
600                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
601
602                 if (!cnt) {
603                         BNX2X_ERR("DMAE timeout!\n");
604                         rc = DMAE_TIMEOUT;
605                         goto unlock;
606                 }
607                 cnt--;
608                 udelay(50);
609         }
610         if (*wb_comp & DMAE_PCI_ERR_FLAG) {
611                 BNX2X_ERR("DMAE PCI error!\n");
612                 rc = DMAE_PCI_ERROR;
613         }
614
615         DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
616            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
617            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
618
619 unlock:
620         spin_unlock_bh(&bp->dmae_lock);
621         return rc;
622 }
623
624 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
625                       u32 len32)
626 {
627         struct dmae_command dmae;
628
629         if (!bp->dmae_ready) {
630                 u32 *data = bnx2x_sp(bp, wb_data[0]);
631
632                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
633                    "  using indirect\n", dst_addr, len32);
634                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
635                 return;
636         }
637
638         /* set opcode and fixed command fields */
639         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
640
641         /* fill in addresses and len */
642         dmae.src_addr_lo = U64_LO(dma_addr);
643         dmae.src_addr_hi = U64_HI(dma_addr);
644         dmae.dst_addr_lo = dst_addr >> 2;
645         dmae.dst_addr_hi = 0;
646         dmae.len = len32;
647
648         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
649
650         /* issue the command and wait for completion */
651         bnx2x_issue_dmae_with_comp(bp, &dmae);
652 }
653
654 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
655 {
656         struct dmae_command dmae;
657
658         if (!bp->dmae_ready) {
659                 u32 *data = bnx2x_sp(bp, wb_data[0]);
660                 int i;
661
662                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
663                    "  using indirect\n", src_addr, len32);
664                 for (i = 0; i < len32; i++)
665                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
666                 return;
667         }
668
669         /* set opcode and fixed command fields */
670         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
671
672         /* fill in addresses and len */
673         dmae.src_addr_lo = src_addr >> 2;
674         dmae.src_addr_hi = 0;
675         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
676         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
677         dmae.len = len32;
678
679         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
680
681         /* issue the command and wait for completion */
682         bnx2x_issue_dmae_with_comp(bp, &dmae);
683 }
684
685 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
686                                       u32 addr, u32 len)
687 {
688         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
689         int offset = 0;
690
691         while (len > dmae_wr_max) {
692                 bnx2x_write_dmae(bp, phys_addr + offset,
693                                  addr + offset, dmae_wr_max);
694                 offset += dmae_wr_max * 4;
695                 len -= dmae_wr_max;
696         }
697
698         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
699 }
700
701 /* used only for slowpath so not inlined */
702 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
703 {
704         u32 wb_write[2];
705
706         wb_write[0] = val_hi;
707         wb_write[1] = val_lo;
708         REG_WR_DMAE(bp, reg, wb_write, 2);
709 }
710
711 #ifdef USE_WB_RD
712 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
713 {
714         u32 wb_data[2];
715
716         REG_RD_DMAE(bp, reg, wb_data, 2);
717
718         return HILO_U64(wb_data[0], wb_data[1]);
719 }
720 #endif
721
722 static int bnx2x_mc_assert(struct bnx2x *bp)
723 {
724         char last_idx;
725         int i, rc = 0;
726         u32 row0, row1, row2, row3;
727
728         /* XSTORM */
729         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
730                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
731         if (last_idx)
732                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
733
734         /* print the asserts */
735         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
736
737                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
738                               XSTORM_ASSERT_LIST_OFFSET(i));
739                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
740                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
741                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
742                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
743                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
744                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
745
746                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
747                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
748                                   " 0x%08x 0x%08x 0x%08x\n",
749                                   i, row3, row2, row1, row0);
750                         rc++;
751                 } else {
752                         break;
753                 }
754         }
755
756         /* TSTORM */
757         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
758                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
759         if (last_idx)
760                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
761
762         /* print the asserts */
763         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
764
765                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
766                               TSTORM_ASSERT_LIST_OFFSET(i));
767                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
768                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
769                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
770                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
771                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
772                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
773
774                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
775                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
776                                   " 0x%08x 0x%08x 0x%08x\n",
777                                   i, row3, row2, row1, row0);
778                         rc++;
779                 } else {
780                         break;
781                 }
782         }
783
784         /* CSTORM */
785         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
786                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
787         if (last_idx)
788                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
789
790         /* print the asserts */
791         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
792
793                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
794                               CSTORM_ASSERT_LIST_OFFSET(i));
795                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
796                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
797                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
798                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
799                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
800                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
801
802                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
803                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
804                                   " 0x%08x 0x%08x 0x%08x\n",
805                                   i, row3, row2, row1, row0);
806                         rc++;
807                 } else {
808                         break;
809                 }
810         }
811
812         /* USTORM */
813         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
814                            USTORM_ASSERT_LIST_INDEX_OFFSET);
815         if (last_idx)
816                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
817
818         /* print the asserts */
819         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
820
821                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
822                               USTORM_ASSERT_LIST_OFFSET(i));
823                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
824                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
825                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
826                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
827                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
828                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
829
830                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
831                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
832                                   " 0x%08x 0x%08x 0x%08x\n",
833                                   i, row3, row2, row1, row0);
834                         rc++;
835                 } else {
836                         break;
837                 }
838         }
839
840         return rc;
841 }
842
843 static void bnx2x_fw_dump(struct bnx2x *bp)
844 {
845         u32 addr;
846         u32 mark, offset;
847         __be32 data[9];
848         int word;
849         u32 trace_shmem_base;
850         if (BP_NOMCP(bp)) {
851                 BNX2X_ERR("NO MCP - can not dump\n");
852                 return;
853         }
854
855         if (BP_PATH(bp) == 0)
856                 trace_shmem_base = bp->common.shmem_base;
857         else
858                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
859         addr = trace_shmem_base - 0x0800 + 4;
860         mark = REG_RD(bp, addr);
861         mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
862                         + ((mark + 0x3) & ~0x3) - 0x08000000;
863         pr_err("begin fw dump (mark 0x%x)\n", mark);
864
865         pr_err("");
866         for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
867                 for (word = 0; word < 8; word++)
868                         data[word] = htonl(REG_RD(bp, offset + 4*word));
869                 data[8] = 0x0;
870                 pr_cont("%s", (char *)data);
871         }
872         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
873                 for (word = 0; word < 8; word++)
874                         data[word] = htonl(REG_RD(bp, offset + 4*word));
875                 data[8] = 0x0;
876                 pr_cont("%s", (char *)data);
877         }
878         pr_err("end of fw dump\n");
879 }
880
881 void bnx2x_panic_dump(struct bnx2x *bp)
882 {
883         int i;
884         u16 j;
885         struct hc_sp_status_block_data sp_sb_data;
886         int func = BP_FUNC(bp);
887 #ifdef BNX2X_STOP_ON_ERROR
888         u16 start = 0, end = 0;
889 #endif
890
891         bp->stats_state = STATS_STATE_DISABLED;
892         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
893
894         BNX2X_ERR("begin crash dump -----------------\n");
895
896         /* Indices */
897         /* Common */
898         BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
899                   "  spq_prod_idx(0x%x)\n",
900                   bp->def_idx, bp->def_att_idx,
901                   bp->attn_state, bp->spq_prod_idx);
902         BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
903                   bp->def_status_blk->atten_status_block.attn_bits,
904                   bp->def_status_blk->atten_status_block.attn_bits_ack,
905                   bp->def_status_blk->atten_status_block.status_block_id,
906                   bp->def_status_blk->atten_status_block.attn_bits_index);
907         BNX2X_ERR("     def (");
908         for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
909                 pr_cont("0x%x%s",
910                        bp->def_status_blk->sp_sb.index_values[i],
911                        (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
912
913         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
914                 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
915                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
916                         i*sizeof(u32));
917
918         pr_cont("igu_sb_id(0x%x)  igu_seg_id (0x%x) "
919                          "pf_id(0x%x)  vnic_id(0x%x)  "
920                          "vf_id(0x%x)  vf_valid (0x%x)\n",
921                sp_sb_data.igu_sb_id,
922                sp_sb_data.igu_seg_id,
923                sp_sb_data.p_func.pf_id,
924                sp_sb_data.p_func.vnic_id,
925                sp_sb_data.p_func.vf_id,
926                sp_sb_data.p_func.vf_valid);
927
928
929         for_each_eth_queue(bp, i) {
930                 struct bnx2x_fastpath *fp = &bp->fp[i];
931                 int loop;
932                 struct hc_status_block_data_e2 sb_data_e2;
933                 struct hc_status_block_data_e1x sb_data_e1x;
934                 struct hc_status_block_sm  *hc_sm_p =
935                         CHIP_IS_E2(bp) ?
936                         sb_data_e2.common.state_machine :
937                         sb_data_e1x.common.state_machine;
938                 struct hc_index_data *hc_index_p =
939                         CHIP_IS_E2(bp) ?
940                         sb_data_e2.index_data :
941                         sb_data_e1x.index_data;
942                 int data_size;
943                 u32 *sb_data_p;
944
945                 /* Rx */
946                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
947                           "  rx_comp_prod(0x%x)"
948                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
949                           i, fp->rx_bd_prod, fp->rx_bd_cons,
950                           fp->rx_comp_prod,
951                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
952                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
953                           "  fp_hc_idx(0x%x)\n",
954                           fp->rx_sge_prod, fp->last_max_sge,
955                           le16_to_cpu(fp->fp_hc_idx));
956
957                 /* Tx */
958                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
959                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
960                           "  *tx_cons_sb(0x%x)\n",
961                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
962                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
963
964                 loop = CHIP_IS_E2(bp) ?
965                         HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
966
967                 /* host sb data */
968
969 #ifdef BCM_CNIC
970                 if (IS_FCOE_FP(fp))
971                         continue;
972 #endif
973                 BNX2X_ERR("     run indexes (");
974                 for (j = 0; j < HC_SB_MAX_SM; j++)
975                         pr_cont("0x%x%s",
976                                fp->sb_running_index[j],
977                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
978
979                 BNX2X_ERR("     indexes (");
980                 for (j = 0; j < loop; j++)
981                         pr_cont("0x%x%s",
982                                fp->sb_index_values[j],
983                                (j == loop - 1) ? ")" : " ");
984                 /* fw sb data */
985                 data_size = CHIP_IS_E2(bp) ?
986                         sizeof(struct hc_status_block_data_e2) :
987                         sizeof(struct hc_status_block_data_e1x);
988                 data_size /= sizeof(u32);
989                 sb_data_p = CHIP_IS_E2(bp) ?
990                         (u32 *)&sb_data_e2 :
991                         (u32 *)&sb_data_e1x;
992                 /* copy sb data in here */
993                 for (j = 0; j < data_size; j++)
994                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
995                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
996                                 j * sizeof(u32));
997
998                 if (CHIP_IS_E2(bp)) {
999                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
1000                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
1001                                 sb_data_e2.common.p_func.pf_id,
1002                                 sb_data_e2.common.p_func.vf_id,
1003                                 sb_data_e2.common.p_func.vf_valid,
1004                                 sb_data_e2.common.p_func.vnic_id,
1005                                 sb_data_e2.common.same_igu_sb_1b);
1006                 } else {
1007                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
1008                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
1009                                 sb_data_e1x.common.p_func.pf_id,
1010                                 sb_data_e1x.common.p_func.vf_id,
1011                                 sb_data_e1x.common.p_func.vf_valid,
1012                                 sb_data_e1x.common.p_func.vnic_id,
1013                                 sb_data_e1x.common.same_igu_sb_1b);
1014                 }
1015
1016                 /* SB_SMs data */
1017                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1018                         pr_cont("SM[%d] __flags (0x%x) "
1019                                "igu_sb_id (0x%x)  igu_seg_id(0x%x) "
1020                                "time_to_expire (0x%x) "
1021                                "timer_value(0x%x)\n", j,
1022                                hc_sm_p[j].__flags,
1023                                hc_sm_p[j].igu_sb_id,
1024                                hc_sm_p[j].igu_seg_id,
1025                                hc_sm_p[j].time_to_expire,
1026                                hc_sm_p[j].timer_value);
1027                 }
1028
1029                 /* Indecies data */
1030                 for (j = 0; j < loop; j++) {
1031                         pr_cont("INDEX[%d] flags (0x%x) "
1032                                          "timeout (0x%x)\n", j,
1033                                hc_index_p[j].flags,
1034                                hc_index_p[j].timeout);
1035                 }
1036         }
1037
1038 #ifdef BNX2X_STOP_ON_ERROR
1039         /* Rings */
1040         /* Rx */
1041         for_each_rx_queue(bp, i) {
1042                 struct bnx2x_fastpath *fp = &bp->fp[i];
1043
1044                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1045                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1046                 for (j = start; j != end; j = RX_BD(j + 1)) {
1047                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1048                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1049
1050                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1051                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
1052                 }
1053
1054                 start = RX_SGE(fp->rx_sge_prod);
1055                 end = RX_SGE(fp->last_max_sge);
1056                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1057                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1058                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1059
1060                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1061                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1062                 }
1063
1064                 start = RCQ_BD(fp->rx_comp_cons - 10);
1065                 end = RCQ_BD(fp->rx_comp_cons + 503);
1066                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1067                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1068
1069                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1070                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1071                 }
1072         }
1073
1074         /* Tx */
1075         for_each_tx_queue(bp, i) {
1076                 struct bnx2x_fastpath *fp = &bp->fp[i];
1077
1078                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1079                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1080                 for (j = start; j != end; j = TX_BD(j + 1)) {
1081                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1082
1083                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1084                                   i, j, sw_bd->skb, sw_bd->first_bd);
1085                 }
1086
1087                 start = TX_BD(fp->tx_bd_cons - 10);
1088                 end = TX_BD(fp->tx_bd_cons + 254);
1089                 for (j = start; j != end; j = TX_BD(j + 1)) {
1090                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1091
1092                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1093                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
1094                 }
1095         }
1096 #endif
1097         bnx2x_fw_dump(bp);
1098         bnx2x_mc_assert(bp);
1099         BNX2X_ERR("end crash dump -----------------\n");
1100 }
1101
1102 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1103 {
1104         int port = BP_PORT(bp);
1105         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1106         u32 val = REG_RD(bp, addr);
1107         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1108         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1109
1110         if (msix) {
1111                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1112                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1113                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1114                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1115         } else if (msi) {
1116                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1117                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1118                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1119                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1120         } else {
1121                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1122                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1123                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1124                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1125
1126                 if (!CHIP_IS_E1(bp)) {
1127                         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1128                            val, port, addr);
1129
1130                         REG_WR(bp, addr, val);
1131
1132                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1133                 }
1134         }
1135
1136         if (CHIP_IS_E1(bp))
1137                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1138
1139         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
1140            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1141
1142         REG_WR(bp, addr, val);
1143         /*
1144          * Ensure that HC_CONFIG is written before leading/trailing edge config
1145          */
1146         mmiowb();
1147         barrier();
1148
1149         if (!CHIP_IS_E1(bp)) {
1150                 /* init leading/trailing edge */
1151                 if (IS_MF(bp)) {
1152                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1153                         if (bp->port.pmf)
1154                                 /* enable nig and gpio3 attention */
1155                                 val |= 0x1100;
1156                 } else
1157                         val = 0xffff;
1158
1159                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1160                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1161         }
1162
1163         /* Make sure that interrupts are indeed enabled from here on */
1164         mmiowb();
1165 }
1166
1167 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1168 {
1169         u32 val;
1170         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1171         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1172
1173         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1174
1175         if (msix) {
1176                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1177                          IGU_PF_CONF_SINGLE_ISR_EN);
1178                 val |= (IGU_PF_CONF_FUNC_EN |
1179                         IGU_PF_CONF_MSI_MSIX_EN |
1180                         IGU_PF_CONF_ATTN_BIT_EN);
1181         } else if (msi) {
1182                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1183                 val |= (IGU_PF_CONF_FUNC_EN |
1184                         IGU_PF_CONF_MSI_MSIX_EN |
1185                         IGU_PF_CONF_ATTN_BIT_EN |
1186                         IGU_PF_CONF_SINGLE_ISR_EN);
1187         } else {
1188                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1189                 val |= (IGU_PF_CONF_FUNC_EN |
1190                         IGU_PF_CONF_INT_LINE_EN |
1191                         IGU_PF_CONF_ATTN_BIT_EN |
1192                         IGU_PF_CONF_SINGLE_ISR_EN);
1193         }
1194
1195         DP(NETIF_MSG_INTR, "write 0x%x to IGU  mode %s\n",
1196            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1197
1198         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1199
1200         barrier();
1201
1202         /* init leading/trailing edge */
1203         if (IS_MF(bp)) {
1204                 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1205                 if (bp->port.pmf)
1206                         /* enable nig and gpio3 attention */
1207                         val |= 0x1100;
1208         } else
1209                 val = 0xffff;
1210
1211         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1212         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1213
1214         /* Make sure that interrupts are indeed enabled from here on */
1215         mmiowb();
1216 }
1217
1218 void bnx2x_int_enable(struct bnx2x *bp)
1219 {
1220         if (bp->common.int_block == INT_BLOCK_HC)
1221                 bnx2x_hc_int_enable(bp);
1222         else
1223                 bnx2x_igu_int_enable(bp);
1224 }
1225
1226 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1227 {
1228         int port = BP_PORT(bp);
1229         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1230         u32 val = REG_RD(bp, addr);
1231
1232         /*
1233          * in E1 we must use only PCI configuration space to disable
1234          * MSI/MSIX capablility
1235          * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1236          */
1237         if (CHIP_IS_E1(bp)) {
1238                 /*  Since IGU_PF_CONF_MSI_MSIX_EN still always on
1239                  *  Use mask register to prevent from HC sending interrupts
1240                  *  after we exit the function
1241                  */
1242                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1243
1244                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1245                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
1246                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1247         } else
1248                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1249                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1250                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
1251                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1252
1253         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1254            val, port, addr);
1255
1256         /* flush all outstanding writes */
1257         mmiowb();
1258
1259         REG_WR(bp, addr, val);
1260         if (REG_RD(bp, addr) != val)
1261                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1262 }
1263
1264 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1265 {
1266         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1267
1268         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1269                  IGU_PF_CONF_INT_LINE_EN |
1270                  IGU_PF_CONF_ATTN_BIT_EN);
1271
1272         DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1273
1274         /* flush all outstanding writes */
1275         mmiowb();
1276
1277         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1278         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1279                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1280 }
1281
1282 static void bnx2x_int_disable(struct bnx2x *bp)
1283 {
1284         if (bp->common.int_block == INT_BLOCK_HC)
1285                 bnx2x_hc_int_disable(bp);
1286         else
1287                 bnx2x_igu_int_disable(bp);
1288 }
1289
1290 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1291 {
1292         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1293         int i, offset;
1294
1295         /* disable interrupt handling */
1296         atomic_inc(&bp->intr_sem);
1297         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1298
1299         if (disable_hw)
1300                 /* prevent the HW from sending interrupts */
1301                 bnx2x_int_disable(bp);
1302
1303         /* make sure all ISRs are done */
1304         if (msix) {
1305                 synchronize_irq(bp->msix_table[0].vector);
1306                 offset = 1;
1307 #ifdef BCM_CNIC
1308                 offset++;
1309 #endif
1310                 for_each_eth_queue(bp, i)
1311                         synchronize_irq(bp->msix_table[i + offset].vector);
1312         } else
1313                 synchronize_irq(bp->pdev->irq);
1314
1315         /* make sure sp_task is not running */
1316         cancel_delayed_work(&bp->sp_task);
1317         flush_workqueue(bnx2x_wq);
1318 }
1319
1320 /* fast path */
1321
1322 /*
1323  * General service functions
1324  */
1325
1326 /* Return true if succeeded to acquire the lock */
1327 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1328 {
1329         u32 lock_status;
1330         u32 resource_bit = (1 << resource);
1331         int func = BP_FUNC(bp);
1332         u32 hw_lock_control_reg;
1333
1334         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1335
1336         /* Validating that the resource is within range */
1337         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1338                 DP(NETIF_MSG_HW,
1339                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1340                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1341                 return false;
1342         }
1343
1344         if (func <= 5)
1345                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1346         else
1347                 hw_lock_control_reg =
1348                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1349
1350         /* Try to acquire the lock */
1351         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1352         lock_status = REG_RD(bp, hw_lock_control_reg);
1353         if (lock_status & resource_bit)
1354                 return true;
1355
1356         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1357         return false;
1358 }
1359
1360 #ifdef BCM_CNIC
1361 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1362 #endif
1363
1364 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1365                            union eth_rx_cqe *rr_cqe)
1366 {
1367         struct bnx2x *bp = fp->bp;
1368         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1369         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1370
1371         DP(BNX2X_MSG_SP,
1372            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1373            fp->index, cid, command, bp->state,
1374            rr_cqe->ramrod_cqe.ramrod_type);
1375
1376         switch (command | fp->state) {
1377         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1378                 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1379                 fp->state = BNX2X_FP_STATE_OPEN;
1380                 break;
1381
1382         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1383                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1384                 fp->state = BNX2X_FP_STATE_HALTED;
1385                 break;
1386
1387         case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1388                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1389                 fp->state = BNX2X_FP_STATE_TERMINATED;
1390                 break;
1391
1392         default:
1393                 BNX2X_ERR("unexpected MC reply (%d)  "
1394                           "fp[%d] state is %x\n",
1395                           command, fp->index, fp->state);
1396                 break;
1397         }
1398
1399         smp_mb__before_atomic_inc();
1400         atomic_inc(&bp->cq_spq_left);
1401         /* push the change in fp->state and towards the memory */
1402         smp_wmb();
1403
1404         return;
1405 }
1406
1407 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1408 {
1409         struct bnx2x *bp = netdev_priv(dev_instance);
1410         u16 status = bnx2x_ack_int(bp);
1411         u16 mask;
1412         int i;
1413
1414         /* Return here if interrupt is shared and it's not for us */
1415         if (unlikely(status == 0)) {
1416                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1417                 return IRQ_NONE;
1418         }
1419         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1420
1421         /* Return here if interrupt is disabled */
1422         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1423                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1424                 return IRQ_HANDLED;
1425         }
1426
1427 #ifdef BNX2X_STOP_ON_ERROR
1428         if (unlikely(bp->panic))
1429                 return IRQ_HANDLED;
1430 #endif
1431
1432         for_each_eth_queue(bp, i) {
1433                 struct bnx2x_fastpath *fp = &bp->fp[i];
1434
1435                 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1436                 if (status & mask) {
1437                         /* Handle Rx and Tx according to SB id */
1438                         prefetch(fp->rx_cons_sb);
1439                         prefetch(fp->tx_cons_sb);
1440                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1441                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1442                         status &= ~mask;
1443                 }
1444         }
1445
1446 #ifdef BCM_CNIC
1447         mask = 0x2;
1448         if (status & (mask | 0x1)) {
1449                 struct cnic_ops *c_ops = NULL;
1450
1451                 rcu_read_lock();
1452                 c_ops = rcu_dereference(bp->cnic_ops);
1453                 if (c_ops)
1454                         c_ops->cnic_handler(bp->cnic_data, NULL);
1455                 rcu_read_unlock();
1456
1457                 status &= ~mask;
1458         }
1459 #endif
1460
1461         if (unlikely(status & 0x1)) {
1462                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1463
1464                 status &= ~0x1;
1465                 if (!status)
1466                         return IRQ_HANDLED;
1467         }
1468
1469         if (unlikely(status))
1470                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1471                    status);
1472
1473         return IRQ_HANDLED;
1474 }
1475
1476 /* end of fast path */
1477
1478
1479 /* Link */
1480
1481 /*
1482  * General service functions
1483  */
1484
1485 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1486 {
1487         u32 lock_status;
1488         u32 resource_bit = (1 << resource);
1489         int func = BP_FUNC(bp);
1490         u32 hw_lock_control_reg;
1491         int cnt;
1492
1493         /* Validating that the resource is within range */
1494         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1495                 DP(NETIF_MSG_HW,
1496                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1497                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1498                 return -EINVAL;
1499         }
1500
1501         if (func <= 5) {
1502                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1503         } else {
1504                 hw_lock_control_reg =
1505                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1506         }
1507
1508         /* Validating that the resource is not already taken */
1509         lock_status = REG_RD(bp, hw_lock_control_reg);
1510         if (lock_status & resource_bit) {
1511                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1512                    lock_status, resource_bit);
1513                 return -EEXIST;
1514         }
1515
1516         /* Try for 5 second every 5ms */
1517         for (cnt = 0; cnt < 1000; cnt++) {
1518                 /* Try to acquire the lock */
1519                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1520                 lock_status = REG_RD(bp, hw_lock_control_reg);
1521                 if (lock_status & resource_bit)
1522                         return 0;
1523
1524                 msleep(5);
1525         }
1526         DP(NETIF_MSG_HW, "Timeout\n");
1527         return -EAGAIN;
1528 }
1529
1530 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1531 {
1532         u32 lock_status;
1533         u32 resource_bit = (1 << resource);
1534         int func = BP_FUNC(bp);
1535         u32 hw_lock_control_reg;
1536
1537         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1538
1539         /* Validating that the resource is within range */
1540         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1541                 DP(NETIF_MSG_HW,
1542                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1543                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1544                 return -EINVAL;
1545         }
1546
1547         if (func <= 5) {
1548                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1549         } else {
1550                 hw_lock_control_reg =
1551                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1552         }
1553
1554         /* Validating that the resource is currently taken */
1555         lock_status = REG_RD(bp, hw_lock_control_reg);
1556         if (!(lock_status & resource_bit)) {
1557                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1558                    lock_status, resource_bit);
1559                 return -EFAULT;
1560         }
1561
1562         REG_WR(bp, hw_lock_control_reg, resource_bit);
1563         return 0;
1564 }
1565
1566
1567 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1568 {
1569         /* The GPIO should be swapped if swap register is set and active */
1570         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1571                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1572         int gpio_shift = gpio_num +
1573                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1574         u32 gpio_mask = (1 << gpio_shift);
1575         u32 gpio_reg;
1576         int value;
1577
1578         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1579                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1580                 return -EINVAL;
1581         }
1582
1583         /* read GPIO value */
1584         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1585
1586         /* get the requested pin value */
1587         if ((gpio_reg & gpio_mask) == gpio_mask)
1588                 value = 1;
1589         else
1590                 value = 0;
1591
1592         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1593
1594         return value;
1595 }
1596
1597 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1598 {
1599         /* The GPIO should be swapped if swap register is set and active */
1600         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1601                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1602         int gpio_shift = gpio_num +
1603                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1604         u32 gpio_mask = (1 << gpio_shift);
1605         u32 gpio_reg;
1606
1607         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1608                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1609                 return -EINVAL;
1610         }
1611
1612         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1613         /* read GPIO and mask except the float bits */
1614         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1615
1616         switch (mode) {
1617         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1618                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1619                    gpio_num, gpio_shift);
1620                 /* clear FLOAT and set CLR */
1621                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1622                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1623                 break;
1624
1625         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1626                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1627                    gpio_num, gpio_shift);
1628                 /* clear FLOAT and set SET */
1629                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1630                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1631                 break;
1632
1633         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1634                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1635                    gpio_num, gpio_shift);
1636                 /* set FLOAT */
1637                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1638                 break;
1639
1640         default:
1641                 break;
1642         }
1643
1644         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1645         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1646
1647         return 0;
1648 }
1649
1650 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1651 {
1652         /* The GPIO should be swapped if swap register is set and active */
1653         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1654                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1655         int gpio_shift = gpio_num +
1656                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1657         u32 gpio_mask = (1 << gpio_shift);
1658         u32 gpio_reg;
1659
1660         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1661                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1662                 return -EINVAL;
1663         }
1664
1665         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1666         /* read GPIO int */
1667         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1668
1669         switch (mode) {
1670         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1671                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1672                                    "output low\n", gpio_num, gpio_shift);
1673                 /* clear SET and set CLR */
1674                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1676                 break;
1677
1678         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1679                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1680                                    "output high\n", gpio_num, gpio_shift);
1681                 /* clear CLR and set SET */
1682                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1683                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1684                 break;
1685
1686         default:
1687                 break;
1688         }
1689
1690         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1691         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1692
1693         return 0;
1694 }
1695
1696 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1697 {
1698         u32 spio_mask = (1 << spio_num);
1699         u32 spio_reg;
1700
1701         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1702             (spio_num > MISC_REGISTERS_SPIO_7)) {
1703                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1704                 return -EINVAL;
1705         }
1706
1707         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1708         /* read SPIO and mask except the float bits */
1709         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1710
1711         switch (mode) {
1712         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1713                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1714                 /* clear FLOAT and set CLR */
1715                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1716                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1717                 break;
1718
1719         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1720                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1721                 /* clear FLOAT and set SET */
1722                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1723                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1724                 break;
1725
1726         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1727                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1728                 /* set FLOAT */
1729                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1730                 break;
1731
1732         default:
1733                 break;
1734         }
1735
1736         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1737         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1738
1739         return 0;
1740 }
1741
1742 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1743 {
1744         u32 sel_phy_idx = 0;
1745         if (bp->link_vars.link_up) {
1746                 sel_phy_idx = EXT_PHY1;
1747                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1748                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1749                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1750                         sel_phy_idx = EXT_PHY2;
1751         } else {
1752
1753                 switch (bnx2x_phy_selection(&bp->link_params)) {
1754                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1755                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1756                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1757                        sel_phy_idx = EXT_PHY1;
1758                        break;
1759                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1760                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1761                        sel_phy_idx = EXT_PHY2;
1762                        break;
1763                 }
1764         }
1765         /*
1766         * The selected actived PHY is always after swapping (in case PHY
1767         * swapping is enabled). So when swapping is enabled, we need to reverse
1768         * the configuration
1769         */
1770
1771         if (bp->link_params.multi_phy_config &
1772             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1773                 if (sel_phy_idx == EXT_PHY1)
1774                         sel_phy_idx = EXT_PHY2;
1775                 else if (sel_phy_idx == EXT_PHY2)
1776                         sel_phy_idx = EXT_PHY1;
1777         }
1778         return LINK_CONFIG_IDX(sel_phy_idx);
1779 }
1780
1781 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1782 {
1783         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1784         switch (bp->link_vars.ieee_fc &
1785                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1786         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1787                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1788                                                    ADVERTISED_Pause);
1789                 break;
1790
1791         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1792                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1793                                                   ADVERTISED_Pause);
1794                 break;
1795
1796         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1797                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1798                 break;
1799
1800         default:
1801                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1802                                                    ADVERTISED_Pause);
1803                 break;
1804         }
1805 }
1806
1807 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1808 {
1809         if (!BP_NOMCP(bp)) {
1810                 u8 rc;
1811                 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1812                 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1813                 /* Initialize link parameters structure variables */
1814                 /* It is recommended to turn off RX FC for jumbo frames
1815                    for better performance */
1816                 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1817                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1818                 else
1819                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1820
1821                 bnx2x_acquire_phy_lock(bp);
1822
1823                 if (load_mode == LOAD_DIAG) {
1824                         bp->link_params.loopback_mode = LOOPBACK_XGXS;
1825                         bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1826                 }
1827
1828                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1829
1830                 bnx2x_release_phy_lock(bp);
1831
1832                 bnx2x_calc_fc_adv(bp);
1833
1834                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1835                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1836                         bnx2x_link_report(bp);
1837                 }
1838                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1839                 return rc;
1840         }
1841         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1842         return -EINVAL;
1843 }
1844
1845 void bnx2x_link_set(struct bnx2x *bp)
1846 {
1847         if (!BP_NOMCP(bp)) {
1848                 bnx2x_acquire_phy_lock(bp);
1849                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1850                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1851                 bnx2x_release_phy_lock(bp);
1852
1853                 bnx2x_calc_fc_adv(bp);
1854         } else
1855                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1856 }
1857
1858 static void bnx2x__link_reset(struct bnx2x *bp)
1859 {
1860         if (!BP_NOMCP(bp)) {
1861                 bnx2x_acquire_phy_lock(bp);
1862                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1863                 bnx2x_release_phy_lock(bp);
1864         } else
1865                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1866 }
1867
1868 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1869 {
1870         u8 rc = 0;
1871
1872         if (!BP_NOMCP(bp)) {
1873                 bnx2x_acquire_phy_lock(bp);
1874                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1875                                      is_serdes);
1876                 bnx2x_release_phy_lock(bp);
1877         } else
1878                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1879
1880         return rc;
1881 }
1882
1883 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1884 {
1885         u32 r_param = bp->link_vars.line_speed / 8;
1886         u32 fair_periodic_timeout_usec;
1887         u32 t_fair;
1888
1889         memset(&(bp->cmng.rs_vars), 0,
1890                sizeof(struct rate_shaping_vars_per_port));
1891         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1892
1893         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1894         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1895
1896         /* this is the threshold below which no timer arming will occur
1897            1.25 coefficient is for the threshold to be a little bigger
1898            than the real time, to compensate for timer in-accuracy */
1899         bp->cmng.rs_vars.rs_threshold =
1900                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1901
1902         /* resolution of fairness timer */
1903         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1904         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1905         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1906
1907         /* this is the threshold below which we won't arm the timer anymore */
1908         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1909
1910         /* we multiply by 1e3/8 to get bytes/msec.
1911            We don't want the credits to pass a credit
1912            of the t_fair*FAIR_MEM (algorithm resolution) */
1913         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1914         /* since each tick is 4 usec */
1915         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1916 }
1917
1918 /* Calculates the sum of vn_min_rates.
1919    It's needed for further normalizing of the min_rates.
1920    Returns:
1921      sum of vn_min_rates.
1922        or
1923      0 - if all the min_rates are 0.
1924      In the later case fainess algorithm should be deactivated.
1925      If not all min_rates are zero then those that are zeroes will be set to 1.
1926  */
1927 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1928 {
1929         int all_zero = 1;
1930         int vn;
1931
1932         bp->vn_weight_sum = 0;
1933         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1934                 u32 vn_cfg = bp->mf_config[vn];
1935                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1936                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1937
1938                 /* Skip hidden vns */
1939                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1940                         continue;
1941
1942                 /* If min rate is zero - set it to 1 */
1943                 if (!vn_min_rate)
1944                         vn_min_rate = DEF_MIN_RATE;
1945                 else
1946                         all_zero = 0;
1947
1948                 bp->vn_weight_sum += vn_min_rate;
1949         }
1950
1951         /* ... only if all min rates are zeros - disable fairness */
1952         if (all_zero) {
1953                 bp->cmng.flags.cmng_enables &=
1954                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1955                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1956                    "  fairness will be disabled\n");
1957         } else
1958                 bp->cmng.flags.cmng_enables |=
1959                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1960 }
1961
1962 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1963 {
1964         struct rate_shaping_vars_per_vn m_rs_vn;
1965         struct fairness_vars_per_vn m_fair_vn;
1966         u32 vn_cfg = bp->mf_config[vn];
1967         int func = 2*vn + BP_PORT(bp);
1968         u16 vn_min_rate, vn_max_rate;
1969         int i;
1970
1971         /* If function is hidden - set min and max to zeroes */
1972         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1973                 vn_min_rate = 0;
1974                 vn_max_rate = 0;
1975
1976         } else {
1977                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1978                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1979                 /* If min rate is zero - set it to 1 */
1980                 if (bp->vn_weight_sum && (vn_min_rate == 0))
1981                         vn_min_rate = DEF_MIN_RATE;
1982                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1983                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1984         }
1985
1986         DP(NETIF_MSG_IFUP,
1987            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1988            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1989
1990         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1991         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1992
1993         /* global vn counter - maximal Mbps for this vn */
1994         m_rs_vn.vn_counter.rate = vn_max_rate;
1995
1996         /* quota - number of bytes transmitted in this period */
1997         m_rs_vn.vn_counter.quota =
1998                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1999
2000         if (bp->vn_weight_sum) {
2001                 /* credit for each period of the fairness algorithm:
2002                    number of bytes in T_FAIR (the vn share the port rate).
2003                    vn_weight_sum should not be larger than 10000, thus
2004                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2005                    than zero */
2006                 m_fair_vn.vn_credit_delta =
2007                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2008                                                    (8 * bp->vn_weight_sum))),
2009                               (bp->cmng.fair_vars.fair_threshold * 2));
2010                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2011                    m_fair_vn.vn_credit_delta);
2012         }
2013
2014         /* Store it to internal memory */
2015         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2016                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2017                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2018                        ((u32 *)(&m_rs_vn))[i]);
2019
2020         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2021                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2022                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2023                        ((u32 *)(&m_fair_vn))[i]);
2024 }
2025
2026 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2027 {
2028         if (CHIP_REV_IS_SLOW(bp))
2029                 return CMNG_FNS_NONE;
2030         if (IS_MF(bp))
2031                 return CMNG_FNS_MINMAX;
2032
2033         return CMNG_FNS_NONE;
2034 }
2035
2036 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2037 {
2038         int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2039
2040         if (BP_NOMCP(bp))
2041                 return; /* what should be the default bvalue in this case */
2042
2043         /* For 2 port configuration the absolute function number formula
2044          * is:
2045          *      abs_func = 2 * vn + BP_PORT + BP_PATH
2046          *
2047          *      and there are 4 functions per port
2048          *
2049          * For 4 port configuration it is
2050          *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2051          *
2052          *      and there are 2 functions per port
2053          */
2054         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2055                 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2056
2057                 if (func >= E1H_FUNC_MAX)
2058                         break;
2059
2060                 bp->mf_config[vn] =
2061                         MF_CFG_RD(bp, func_mf_config[func].config);
2062         }
2063 }
2064
2065 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2066 {
2067
2068         if (cmng_type == CMNG_FNS_MINMAX) {
2069                 int vn;
2070
2071                 /* clear cmng_enables */
2072                 bp->cmng.flags.cmng_enables = 0;
2073
2074                 /* read mf conf from shmem */
2075                 if (read_cfg)
2076                         bnx2x_read_mf_cfg(bp);
2077
2078                 /* Init rate shaping and fairness contexts */
2079                 bnx2x_init_port_minmax(bp);
2080
2081                 /* vn_weight_sum and enable fairness if not 0 */
2082                 bnx2x_calc_vn_weight_sum(bp);
2083
2084                 /* calculate and set min-max rate for each vn */
2085                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2086                         bnx2x_init_vn_minmax(bp, vn);
2087
2088                 /* always enable rate shaping and fairness */
2089                 bp->cmng.flags.cmng_enables |=
2090                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2091                 if (!bp->vn_weight_sum)
2092                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2093                                    "  fairness will be disabled\n");
2094                 return;
2095         }
2096
2097         /* rate shaping and fairness are disabled */
2098         DP(NETIF_MSG_IFUP,
2099            "rate shaping and fairness are disabled\n");
2100 }
2101
2102 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2103 {
2104         int port = BP_PORT(bp);
2105         int func;
2106         int vn;
2107
2108         /* Set the attention towards other drivers on the same port */
2109         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2110                 if (vn == BP_E1HVN(bp))
2111                         continue;
2112
2113                 func = ((vn << 1) | port);
2114                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2115                        (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2116         }
2117 }
2118
2119 /* This function is called upon link interrupt */
2120 static void bnx2x_link_attn(struct bnx2x *bp)
2121 {
2122         u32 prev_link_status = bp->link_vars.link_status;
2123         /* Make sure that we are synced with the current statistics */
2124         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2125
2126         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2127
2128         if (bp->link_vars.link_up) {
2129
2130                 /* dropless flow control */
2131                 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2132                         int port = BP_PORT(bp);
2133                         u32 pause_enabled = 0;
2134
2135                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2136                                 pause_enabled = 1;
2137
2138                         REG_WR(bp, BAR_USTRORM_INTMEM +
2139                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2140                                pause_enabled);
2141                 }
2142
2143                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2144                         struct host_port_stats *pstats;
2145
2146                         pstats = bnx2x_sp(bp, port_stats);
2147                         /* reset old bmac stats */
2148                         memset(&(pstats->mac_stx[0]), 0,
2149                                sizeof(struct mac_stx));
2150                 }
2151                 if (bp->state == BNX2X_STATE_OPEN)
2152                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2153         }
2154
2155         /* indicate link status only if link status actually changed */
2156         if (prev_link_status != bp->link_vars.link_status)
2157                 bnx2x_link_report(bp);
2158
2159         if (IS_MF(bp))
2160                 bnx2x_link_sync_notify(bp);
2161
2162         if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2163                 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2164
2165                 if (cmng_fns != CMNG_FNS_NONE) {
2166                         bnx2x_cmng_fns_init(bp, false, cmng_fns);
2167                         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2168                 } else
2169                         /* rate shaping and fairness are disabled */
2170                         DP(NETIF_MSG_IFUP,
2171                            "single function mode without fairness\n");
2172         }
2173 }
2174
2175 void bnx2x__link_status_update(struct bnx2x *bp)
2176 {
2177         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2178                 return;
2179
2180         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2181
2182         if (bp->link_vars.link_up)
2183                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2184         else
2185                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2186
2187         /* the link status update could be the result of a DCC event
2188            hence re-read the shmem mf configuration */
2189         bnx2x_read_mf_cfg(bp);
2190
2191         /* indicate link status */
2192         bnx2x_link_report(bp);
2193 }
2194
2195 static void bnx2x_pmf_update(struct bnx2x *bp)
2196 {
2197         int port = BP_PORT(bp);
2198         u32 val;
2199
2200         bp->port.pmf = 1;
2201         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2202
2203         /* enable nig attention */
2204         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2205         if (bp->common.int_block == INT_BLOCK_HC) {
2206                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2207                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2208         } else if (CHIP_IS_E2(bp)) {
2209                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2210                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2211         }
2212
2213         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2214 }
2215
2216 /* end of Link */
2217
2218 /* slow path */
2219
2220 /*
2221  * General service functions
2222  */
2223
2224 /* send the MCP a request, block until there is a reply */
2225 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2226 {
2227         int mb_idx = BP_FW_MB_IDX(bp);
2228         u32 seq = ++bp->fw_seq;
2229         u32 rc = 0;
2230         u32 cnt = 1;
2231         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2232
2233         mutex_lock(&bp->fw_mb_mutex);
2234         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2235         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2236
2237         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2238
2239         do {
2240                 /* let the FW do it's magic ... */
2241                 msleep(delay);
2242
2243                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2244
2245                 /* Give the FW up to 5 second (500*10ms) */
2246         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2247
2248         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2249            cnt*delay, rc, seq);
2250
2251         /* is this a reply to our command? */
2252         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2253                 rc &= FW_MSG_CODE_MASK;
2254         else {
2255                 /* FW BUG! */
2256                 BNX2X_ERR("FW failed to respond!\n");
2257                 bnx2x_fw_dump(bp);
2258                 rc = 0;
2259         }
2260         mutex_unlock(&bp->fw_mb_mutex);
2261
2262         return rc;
2263 }
2264
2265 static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2266 {
2267 #ifdef BCM_CNIC
2268         if (IS_FCOE_FP(fp) && IS_MF(bp))
2269                 return false;
2270 #endif
2271         return true;
2272 }
2273
2274 /* must be called under rtnl_lock */
2275 static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2276 {
2277         u32 mask = (1 << cl_id);
2278
2279         /* initial seeting is BNX2X_ACCEPT_NONE */
2280         u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2281         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2282         u8 unmatched_unicast = 0;
2283
2284         if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2285                 unmatched_unicast = 1;
2286
2287         if (filters & BNX2X_PROMISCUOUS_MODE) {
2288                 /* promiscious - accept all, drop none */
2289                 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2290                 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2291                 if (IS_MF_SI(bp)) {
2292                         /*
2293                          * SI mode defines to accept in promiscuos mode
2294                          * only unmatched packets
2295                          */
2296                         unmatched_unicast = 1;
2297                         accp_all_ucast = 0;
2298                 }
2299         }
2300         if (filters & BNX2X_ACCEPT_UNICAST) {
2301                 /* accept matched ucast */
2302                 drop_all_ucast = 0;
2303         }
2304         if (filters & BNX2X_ACCEPT_MULTICAST)
2305                 /* accept matched mcast */
2306                 drop_all_mcast = 0;
2307
2308         if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2309                 /* accept all mcast */
2310                 drop_all_ucast = 0;
2311                 accp_all_ucast = 1;
2312         }
2313         if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2314                 /* accept all mcast */
2315                 drop_all_mcast = 0;
2316                 accp_all_mcast = 1;
2317         }
2318         if (filters & BNX2X_ACCEPT_BROADCAST) {
2319                 /* accept (all) bcast */
2320                 drop_all_bcast = 0;
2321                 accp_all_bcast = 1;
2322         }
2323
2324         bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2325                 bp->mac_filters.ucast_drop_all | mask :
2326                 bp->mac_filters.ucast_drop_all & ~mask;
2327
2328         bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2329                 bp->mac_filters.mcast_drop_all | mask :
2330                 bp->mac_filters.mcast_drop_all & ~mask;
2331
2332         bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2333                 bp->mac_filters.bcast_drop_all | mask :
2334                 bp->mac_filters.bcast_drop_all & ~mask;
2335
2336         bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2337                 bp->mac_filters.ucast_accept_all | mask :
2338                 bp->mac_filters.ucast_accept_all & ~mask;
2339
2340         bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2341                 bp->mac_filters.mcast_accept_all | mask :
2342                 bp->mac_filters.mcast_accept_all & ~mask;
2343
2344         bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2345                 bp->mac_filters.bcast_accept_all | mask :
2346                 bp->mac_filters.bcast_accept_all & ~mask;
2347
2348         bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2349                 bp->mac_filters.unmatched_unicast | mask :
2350                 bp->mac_filters.unmatched_unicast & ~mask;
2351 }
2352
2353 static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2354 {
2355         struct tstorm_eth_function_common_config tcfg = {0};
2356         u16 rss_flgs;
2357
2358         /* tpa */
2359         if (p->func_flgs & FUNC_FLG_TPA)
2360                 tcfg.config_flags |=
2361                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2362
2363         /* set rss flags */
2364         rss_flgs = (p->rss->mode <<
2365                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2366
2367         if (p->rss->cap & RSS_IPV4_CAP)
2368                 rss_flgs |= RSS_IPV4_CAP_MASK;
2369         if (p->rss->cap & RSS_IPV4_TCP_CAP)
2370                 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2371         if (p->rss->cap & RSS_IPV6_CAP)
2372                 rss_flgs |= RSS_IPV6_CAP_MASK;
2373         if (p->rss->cap & RSS_IPV6_TCP_CAP)
2374                 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2375
2376         tcfg.config_flags |= rss_flgs;
2377         tcfg.rss_result_mask = p->rss->result_mask;
2378
2379         storm_memset_func_cfg(bp, &tcfg, p->func_id);
2380
2381         /* Enable the function in the FW */
2382         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2383         storm_memset_func_en(bp, p->func_id, 1);
2384
2385         /* statistics */
2386         if (p->func_flgs & FUNC_FLG_STATS) {
2387                 struct stats_indication_flags stats_flags = {0};
2388                 stats_flags.collect_eth = 1;
2389
2390                 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2391                 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2392
2393                 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2394                 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2395
2396                 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2397                 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2398
2399                 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2400                 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2401         }
2402
2403         /* spq */
2404         if (p->func_flgs & FUNC_FLG_SPQ) {
2405                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2406                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2407                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2408         }
2409 }
2410
2411 static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2412                                      struct bnx2x_fastpath *fp)
2413 {
2414         u16 flags = 0;
2415
2416         /* calculate queue flags */
2417         flags |= QUEUE_FLG_CACHE_ALIGN;
2418         flags |= QUEUE_FLG_HC;
2419         flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
2420
2421         flags |= QUEUE_FLG_VLAN;
2422         DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2423
2424         if (!fp->disable_tpa)
2425                 flags |= QUEUE_FLG_TPA;
2426
2427         flags = stat_counter_valid(bp, fp) ?
2428                         (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
2429
2430         return flags;
2431 }
2432
2433 static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2434         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2435         struct bnx2x_rxq_init_params *rxq_init)
2436 {
2437         u16 max_sge = 0;
2438         u16 sge_sz = 0;
2439         u16 tpa_agg_size = 0;
2440
2441         /* calculate queue flags */
2442         u16 flags = bnx2x_get_cl_flags(bp, fp);
2443
2444         if (!fp->disable_tpa) {
2445                 pause->sge_th_hi = 250;
2446                 pause->sge_th_lo = 150;
2447                 tpa_agg_size = min_t(u32,
2448                         (min_t(u32, 8, MAX_SKB_FRAGS) *
2449                         SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2450                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2451                         SGE_PAGE_SHIFT;
2452                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2453                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2454                 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2455                                     0xffff);
2456         }
2457
2458         /* pause - not for e1 */
2459         if (!CHIP_IS_E1(bp)) {
2460                 pause->bd_th_hi = 350;
2461                 pause->bd_th_lo = 250;
2462                 pause->rcq_th_hi = 350;
2463                 pause->rcq_th_lo = 250;
2464                 pause->sge_th_hi = 0;
2465                 pause->sge_th_lo = 0;
2466                 pause->pri_map = 1;
2467         }
2468
2469         /* rxq setup */
2470         rxq_init->flags = flags;
2471         rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2472         rxq_init->dscr_map = fp->rx_desc_mapping;
2473         rxq_init->sge_map = fp->rx_sge_mapping;
2474         rxq_init->rcq_map = fp->rx_comp_mapping;
2475         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2476
2477         /* Always use mini-jumbo MTU for FCoE L2 ring */
2478         if (IS_FCOE_FP(fp))
2479                 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2480         else
2481                 rxq_init->mtu = bp->dev->mtu;
2482
2483         rxq_init->buf_sz = fp->rx_buf_size;
2484         rxq_init->cl_qzone_id = fp->cl_qzone_id;
2485         rxq_init->cl_id = fp->cl_id;
2486         rxq_init->spcl_id = fp->cl_id;
2487         rxq_init->stat_id = fp->cl_id;
2488         rxq_init->tpa_agg_sz = tpa_agg_size;
2489         rxq_init->sge_buf_sz = sge_sz;
2490         rxq_init->max_sges_pkt = max_sge;
2491         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2492         rxq_init->fw_sb_id = fp->fw_sb_id;
2493
2494         if (IS_FCOE_FP(fp))
2495                 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2496         else
2497                 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2498
2499         rxq_init->cid = HW_CID(bp, fp->cid);
2500
2501         rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2502 }
2503
2504 static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2505         struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2506 {
2507         u16 flags = bnx2x_get_cl_flags(bp, fp);
2508
2509         txq_init->flags = flags;
2510         txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2511         txq_init->dscr_map = fp->tx_desc_mapping;
2512         txq_init->stat_id = fp->cl_id;
2513         txq_init->cid = HW_CID(bp, fp->cid);
2514         txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2515         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2516         txq_init->fw_sb_id = fp->fw_sb_id;
2517
2518         if (IS_FCOE_FP(fp)) {
2519                 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2520                 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2521         }
2522
2523         txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2524 }
2525
2526 static void bnx2x_pf_init(struct bnx2x *bp)
2527 {
2528         struct bnx2x_func_init_params func_init = {0};
2529         struct bnx2x_rss_params rss = {0};
2530         struct event_ring_data eq_data = { {0} };
2531         u16 flags;
2532
2533         /* pf specific setups */
2534         if (!CHIP_IS_E1(bp))
2535                 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2536
2537         if (CHIP_IS_E2(bp)) {
2538                 /* reset IGU PF statistics: MSIX + ATTN */
2539                 /* PF */
2540                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2541                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2542                            (CHIP_MODE_IS_4_PORT(bp) ?
2543                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2544                 /* ATTN */
2545                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2546                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2547                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2548                            (CHIP_MODE_IS_4_PORT(bp) ?
2549                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2550         }
2551
2552         /* function setup flags */
2553         flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2554
2555         if (CHIP_IS_E1x(bp))
2556                 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2557         else
2558                 flags |= FUNC_FLG_TPA;
2559
2560         /* function setup */
2561
2562         /**
2563          * Although RSS is meaningless when there is a single HW queue we
2564          * still need it enabled in order to have HW Rx hash generated.
2565          */
2566         rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2567                    RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2568         rss.mode = bp->multi_mode;
2569         rss.result_mask = MULTI_MASK;
2570         func_init.rss = &rss;
2571
2572         func_init.func_flgs = flags;
2573         func_init.pf_id = BP_FUNC(bp);
2574         func_init.func_id = BP_FUNC(bp);
2575         func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2576         func_init.spq_map = bp->spq_mapping;
2577         func_init.spq_prod = bp->spq_prod_idx;
2578
2579         bnx2x_func_init(bp, &func_init);
2580
2581         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2582
2583         /*
2584         Congestion management values depend on the link rate
2585         There is no active link so initial link rate is set to 10 Gbps.
2586         When the link comes up The congestion management values are
2587         re-calculated according to the actual link rate.
2588         */
2589         bp->link_vars.line_speed = SPEED_10000;
2590         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2591
2592         /* Only the PMF sets the HW */
2593         if (bp->port.pmf)
2594                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2595
2596         /* no rx until link is up */
2597         bp->rx_mode = BNX2X_RX_MODE_NONE;
2598         bnx2x_set_storm_rx_mode(bp);
2599
2600         /* init Event Queue */
2601         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2602         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2603         eq_data.producer = bp->eq_prod;
2604         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2605         eq_data.sb_id = DEF_SB_ID;
2606         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2607 }
2608
2609
2610 static void bnx2x_e1h_disable(struct bnx2x *bp)
2611 {
2612         int port = BP_PORT(bp);
2613
2614         netif_tx_disable(bp->dev);
2615
2616         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2617
2618         netif_carrier_off(bp->dev);
2619 }
2620
2621 static void bnx2x_e1h_enable(struct bnx2x *bp)
2622 {
2623         int port = BP_PORT(bp);
2624
2625         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2626
2627         /* Tx queue should be only reenabled */
2628         netif_tx_wake_all_queues(bp->dev);
2629
2630         /*
2631          * Should not call netif_carrier_on since it will be called if the link
2632          * is up when checking for link state
2633          */
2634 }
2635
2636 /* called due to MCP event (on pmf):
2637  *      reread new bandwidth configuration
2638  *      configure FW
2639  *      notify others function about the change
2640  */
2641 static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2642 {
2643         if (bp->link_vars.link_up) {
2644                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2645                 bnx2x_link_sync_notify(bp);
2646         }
2647         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2648 }
2649
2650 static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2651 {
2652         bnx2x_config_mf_bw(bp);
2653         bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2654 }
2655
2656 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2657 {
2658         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2659
2660         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2661
2662                 /*
2663                  * This is the only place besides the function initialization
2664                  * where the bp->flags can change so it is done without any
2665                  * locks
2666                  */
2667                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2668                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2669                         bp->flags |= MF_FUNC_DIS;
2670
2671                         bnx2x_e1h_disable(bp);
2672                 } else {
2673                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2674                         bp->flags &= ~MF_FUNC_DIS;
2675
2676                         bnx2x_e1h_enable(bp);
2677                 }
2678                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2679         }
2680         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2681                 bnx2x_config_mf_bw(bp);
2682                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2683         }
2684
2685         /* Report results to MCP */
2686         if (dcc_event)
2687                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2688         else
2689                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2690 }
2691
2692 /* must be called under the spq lock */
2693 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2694 {
2695         struct eth_spe *next_spe = bp->spq_prod_bd;
2696
2697         if (bp->spq_prod_bd == bp->spq_last_bd) {
2698                 bp->spq_prod_bd = bp->spq;
2699                 bp->spq_prod_idx = 0;
2700                 DP(NETIF_MSG_TIMER, "end of spq\n");
2701         } else {
2702                 bp->spq_prod_bd++;
2703                 bp->spq_prod_idx++;
2704         }
2705         return next_spe;
2706 }
2707
2708 /* must be called under the spq lock */
2709 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2710 {
2711         int func = BP_FUNC(bp);
2712
2713         /* Make sure that BD data is updated before writing the producer */
2714         wmb();
2715
2716         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2717                  bp->spq_prod_idx);
2718         mmiowb();
2719 }
2720
2721 /* the slow path queue is odd since completions arrive on the fastpath ring */
2722 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2723                   u32 data_hi, u32 data_lo, int common)
2724 {
2725         struct eth_spe *spe;
2726         u16 type;
2727
2728 #ifdef BNX2X_STOP_ON_ERROR
2729         if (unlikely(bp->panic))
2730                 return -EIO;
2731 #endif
2732
2733         spin_lock_bh(&bp->spq_lock);
2734
2735         if (common) {
2736                 if (!atomic_read(&bp->eq_spq_left)) {
2737                         BNX2X_ERR("BUG! EQ ring full!\n");
2738                         spin_unlock_bh(&bp->spq_lock);
2739                         bnx2x_panic();
2740                         return -EBUSY;
2741                 }
2742         } else if (!atomic_read(&bp->cq_spq_left)) {
2743                         BNX2X_ERR("BUG! SPQ ring full!\n");
2744                         spin_unlock_bh(&bp->spq_lock);
2745                         bnx2x_panic();
2746                         return -EBUSY;
2747         }
2748
2749         spe = bnx2x_sp_get_next(bp);
2750
2751         /* CID needs port number to be encoded int it */
2752         spe->hdr.conn_and_cmd_data =
2753                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2754                                     HW_CID(bp, cid));
2755
2756         if (common)
2757                 /* Common ramrods:
2758                  *      FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2759                  *      TRAFFIC_STOP, TRAFFIC_START
2760                  */
2761                 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2762                         & SPE_HDR_CONN_TYPE;
2763         else
2764                 /* ETH ramrods: SETUP, HALT */
2765                 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2766                         & SPE_HDR_CONN_TYPE;
2767
2768         type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2769                  SPE_HDR_FUNCTION_ID);
2770
2771         spe->hdr.type = cpu_to_le16(type);
2772
2773         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2774         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2775
2776         /* stats ramrod has it's own slot on the spq */
2777         if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
2778                 /* It's ok if the actual decrement is issued towards the memory
2779                  * somewhere between the spin_lock and spin_unlock. Thus no
2780                  * more explict memory barrier is needed.
2781                  */
2782                 if (common)
2783                         atomic_dec(&bp->eq_spq_left);
2784                 else
2785                         atomic_dec(&bp->cq_spq_left);
2786         }
2787
2788
2789         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2790            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x) "
2791            "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
2792            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2793            (u32)(U64_LO(bp->spq_mapping) +
2794            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2795            HW_CID(bp, cid), data_hi, data_lo, type,
2796            atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
2797
2798         bnx2x_sp_prod_update(bp);
2799         spin_unlock_bh(&bp->spq_lock);
2800         return 0;
2801 }
2802
2803 /* acquire split MCP access lock register */
2804 static int bnx2x_acquire_alr(struct bnx2x *bp)
2805 {
2806         u32 j, val;
2807         int rc = 0;
2808
2809         might_sleep();
2810         for (j = 0; j < 1000; j++) {
2811                 val = (1UL << 31);
2812                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2813                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2814                 if (val & (1L << 31))
2815                         break;
2816
2817                 msleep(5);
2818         }
2819         if (!(val & (1L << 31))) {
2820                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2821                 rc = -EBUSY;
2822         }
2823
2824         return rc;
2825 }
2826
2827 /* release split MCP access lock register */
2828 static void bnx2x_release_alr(struct bnx2x *bp)
2829 {
2830         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2831 }
2832
2833 #define BNX2X_DEF_SB_ATT_IDX    0x0001
2834 #define BNX2X_DEF_SB_IDX        0x0002
2835
2836 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2837 {
2838         struct host_sp_status_block *def_sb = bp->def_status_blk;
2839         u16 rc = 0;
2840
2841         barrier(); /* status block is written to by the chip */
2842         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2843                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2844                 rc |= BNX2X_DEF_SB_ATT_IDX;
2845         }
2846
2847         if (bp->def_idx != def_sb->sp_sb.running_index) {
2848                 bp->def_idx = def_sb->sp_sb.running_index;
2849                 rc |= BNX2X_DEF_SB_IDX;
2850         }
2851
2852         /* Do not reorder: indecies reading should complete before handling */
2853         barrier();
2854         return rc;
2855 }
2856
2857 /*
2858  * slow path service functions
2859  */
2860
2861 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2862 {
2863         int port = BP_PORT(bp);
2864         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2865                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2866         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2867                                        NIG_REG_MASK_INTERRUPT_PORT0;
2868         u32 aeu_mask;
2869         u32 nig_mask = 0;
2870         u32 reg_addr;
2871
2872         if (bp->attn_state & asserted)
2873                 BNX2X_ERR("IGU ERROR\n");
2874
2875         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2876         aeu_mask = REG_RD(bp, aeu_addr);
2877
2878         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2879            aeu_mask, asserted);
2880         aeu_mask &= ~(asserted & 0x3ff);
2881         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2882
2883         REG_WR(bp, aeu_addr, aeu_mask);
2884         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2885
2886         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2887         bp->attn_state |= asserted;
2888         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2889
2890         if (asserted & ATTN_HARD_WIRED_MASK) {
2891                 if (asserted & ATTN_NIG_FOR_FUNC) {
2892
2893                         bnx2x_acquire_phy_lock(bp);
2894
2895                         /* save nig interrupt mask */
2896                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2897                         REG_WR(bp, nig_int_mask_addr, 0);
2898
2899                         bnx2x_link_attn(bp);
2900
2901                         /* handle unicore attn? */
2902                 }
2903                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2904                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2905
2906                 if (asserted & GPIO_2_FUNC)
2907                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2908
2909                 if (asserted & GPIO_3_FUNC)
2910                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2911
2912                 if (asserted & GPIO_4_FUNC)
2913                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2914
2915                 if (port == 0) {
2916                         if (asserted & ATTN_GENERAL_ATTN_1) {
2917                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2918                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2919                         }
2920                         if (asserted & ATTN_GENERAL_ATTN_2) {
2921                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2922                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2923                         }
2924                         if (asserted & ATTN_GENERAL_ATTN_3) {
2925                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2926                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2927                         }
2928                 } else {
2929                         if (asserted & ATTN_GENERAL_ATTN_4) {
2930                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2931                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2932                         }
2933                         if (asserted & ATTN_GENERAL_ATTN_5) {
2934                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2935                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2936                         }
2937                         if (asserted & ATTN_GENERAL_ATTN_6) {
2938                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2939                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2940                         }
2941                 }
2942
2943         } /* if hardwired */
2944
2945         if (bp->common.int_block == INT_BLOCK_HC)
2946                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2947                             COMMAND_REG_ATTN_BITS_SET);
2948         else
2949                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2950
2951         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2952            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2953         REG_WR(bp, reg_addr, asserted);
2954
2955         /* now set back the mask */
2956         if (asserted & ATTN_NIG_FOR_FUNC) {
2957                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2958                 bnx2x_release_phy_lock(bp);
2959         }
2960 }
2961
2962 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2963 {
2964         int port = BP_PORT(bp);
2965         u32 ext_phy_config;
2966         /* mark the failure */
2967         ext_phy_config =
2968                 SHMEM_RD(bp,
2969                          dev_info.port_hw_config[port].external_phy_config);
2970
2971         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2972         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2973         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2974                  ext_phy_config);
2975
2976         /* log the failure */
2977         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2978                " the driver to shutdown the card to prevent permanent"
2979                " damage.  Please contact OEM Support for assistance\n");
2980 }
2981
2982 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2983 {
2984         int port = BP_PORT(bp);
2985         int reg_offset;
2986         u32 val;
2987
2988         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2989                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2990
2991         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2992
2993                 val = REG_RD(bp, reg_offset);
2994                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2995                 REG_WR(bp, reg_offset, val);
2996
2997                 BNX2X_ERR("SPIO5 hw attention\n");
2998
2999                 /* Fan failure attention */
3000                 bnx2x_hw_reset_phy(&bp->link_params);
3001                 bnx2x_fan_failure(bp);
3002         }
3003
3004         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3005                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3006                 bnx2x_acquire_phy_lock(bp);
3007                 bnx2x_handle_module_detect_int(&bp->link_params);
3008                 bnx2x_release_phy_lock(bp);
3009         }
3010
3011         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3012
3013                 val = REG_RD(bp, reg_offset);
3014                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3015                 REG_WR(bp, reg_offset, val);
3016
3017                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3018                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3019                 bnx2x_panic();
3020         }
3021 }
3022
3023 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3024 {
3025         u32 val;
3026
3027         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3028
3029                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3030                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3031                 /* DORQ discard attention */
3032                 if (val & 0x2)
3033                         BNX2X_ERR("FATAL error from DORQ\n");
3034         }
3035
3036         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3037
3038                 int port = BP_PORT(bp);
3039                 int reg_offset;
3040
3041                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3042                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3043
3044                 val = REG_RD(bp, reg_offset);
3045                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3046                 REG_WR(bp, reg_offset, val);
3047
3048                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3049                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3050                 bnx2x_panic();
3051         }
3052 }
3053
3054 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3055 {
3056         u32 val;
3057
3058         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3059
3060                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3061                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3062                 /* CFC error attention */
3063                 if (val & 0x2)
3064                         BNX2X_ERR("FATAL error from CFC\n");
3065         }
3066
3067         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3068
3069                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3070                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3071                 /* RQ_USDMDP_FIFO_OVERFLOW */
3072                 if (val & 0x18000)
3073                         BNX2X_ERR("FATAL error from PXP\n");
3074                 if (CHIP_IS_E2(bp)) {
3075                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3076                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3077                 }
3078         }
3079
3080         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3081
3082                 int port = BP_PORT(bp);
3083                 int reg_offset;
3084
3085                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3086                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3087
3088                 val = REG_RD(bp, reg_offset);
3089                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3090                 REG_WR(bp, reg_offset, val);
3091
3092                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3093                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3094                 bnx2x_panic();
3095         }
3096 }
3097
3098 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3099 {
3100         u32 val;
3101
3102         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3103
3104                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3105                         int func = BP_FUNC(bp);
3106
3107                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3108                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3109                                         func_mf_config[BP_ABS_FUNC(bp)].config);
3110                         val = SHMEM_RD(bp,
3111                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
3112                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3113                                 bnx2x_dcc_event(bp,
3114                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3115
3116                         if (val & DRV_STATUS_SET_MF_BW)
3117                                 bnx2x_set_mf_bw(bp);
3118
3119                         bnx2x__link_status_update(bp);
3120                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3121                                 bnx2x_pmf_update(bp);
3122
3123                         if (bp->port.pmf &&
3124                             (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3125                                 bp->dcbx_enabled > 0)
3126                                 /* start dcbx state machine */
3127                                 bnx2x_dcbx_set_params(bp,
3128                                         BNX2X_DCBX_STATE_NEG_RECEIVED);
3129                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3130
3131                         BNX2X_ERR("MC assert!\n");
3132                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3133                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3134                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3135                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3136                         bnx2x_panic();
3137
3138                 } else if (attn & BNX2X_MCP_ASSERT) {
3139
3140                         BNX2X_ERR("MCP assert!\n");
3141                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3142                         bnx2x_fw_dump(bp);
3143
3144                 } else
3145                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3146         }
3147
3148         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3149                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3150                 if (attn & BNX2X_GRC_TIMEOUT) {
3151                         val = CHIP_IS_E1(bp) ? 0 :
3152                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3153                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3154                 }
3155                 if (attn & BNX2X_GRC_RSV) {
3156                         val = CHIP_IS_E1(bp) ? 0 :
3157                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3158                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3159                 }
3160                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3161         }
3162 }
3163
3164 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3165 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3166 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3167 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3168 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3169
3170 /*
3171  * should be run under rtnl lock
3172  */
3173 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3174 {
3175         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3176         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3177         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3178         barrier();
3179         mmiowb();
3180 }
3181
3182 /*
3183  * should be run under rtnl lock
3184  */
3185 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3186 {
3187         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3188         val |= (1 << 16);
3189         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3190         barrier();
3191         mmiowb();
3192 }
3193
3194 /*
3195  * should be run under rtnl lock
3196  */
3197 bool bnx2x_reset_is_done(struct bnx2x *bp)
3198 {
3199         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3201         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3202 }
3203
3204 /*
3205  * should be run under rtnl lock
3206  */
3207 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3208 {
3209         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3210
3211         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3212
3213         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3214         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3215         barrier();
3216         mmiowb();
3217 }
3218
3219 /*
3220  * should be run under rtnl lock
3221  */
3222 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3223 {
3224         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225
3226         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3227
3228         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3229         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3230         barrier();
3231         mmiowb();
3232
3233         return val1;
3234 }
3235
3236 /*
3237  * should be run under rtnl lock
3238  */
3239 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3240 {
3241         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3242 }
3243
3244 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3245 {
3246         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3247         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3248 }
3249
3250 static inline void _print_next_block(int idx, const char *blk)
3251 {
3252         if (idx)
3253                 pr_cont(", ");
3254         pr_cont("%s", blk);
3255 }
3256
3257 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3258 {
3259         int i = 0;
3260         u32 cur_bit = 0;
3261         for (i = 0; sig; i++) {
3262                 cur_bit = ((u32)0x1 << i);
3263                 if (sig & cur_bit) {
3264                         switch (cur_bit) {
3265                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3266                                 _print_next_block(par_num++, "BRB");
3267                                 break;
3268                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3269                                 _print_next_block(par_num++, "PARSER");
3270                                 break;
3271                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3272                                 _print_next_block(par_num++, "TSDM");
3273                                 break;
3274                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3275                                 _print_next_block(par_num++, "SEARCHER");
3276                                 break;
3277                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3278                                 _print_next_block(par_num++, "TSEMI");
3279                                 break;
3280                         }
3281
3282                         /* Clear the bit */
3283                         sig &= ~cur_bit;
3284                 }
3285         }
3286
3287         return par_num;
3288 }
3289
3290 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3291 {
3292         int i = 0;
3293         u32 cur_bit = 0;
3294         for (i = 0; sig; i++) {
3295                 cur_bit = ((u32)0x1 << i);
3296                 if (sig & cur_bit) {
3297                         switch (cur_bit) {
3298                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3299                                 _print_next_block(par_num++, "PBCLIENT");
3300                                 break;
3301                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3302                                 _print_next_block(par_num++, "QM");
3303                                 break;
3304                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3305                                 _print_next_block(par_num++, "XSDM");
3306                                 break;
3307                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3308                                 _print_next_block(par_num++, "XSEMI");
3309                                 break;
3310                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3311                                 _print_next_block(par_num++, "DOORBELLQ");
3312                                 break;
3313                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3314                                 _print_next_block(par_num++, "VAUX PCI CORE");
3315                                 break;
3316                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3317                                 _print_next_block(par_num++, "DEBUG");
3318                                 break;
3319                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3320                                 _print_next_block(par_num++, "USDM");
3321                                 break;
3322                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3323                                 _print_next_block(par_num++, "USEMI");
3324                                 break;
3325                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3326                                 _print_next_block(par_num++, "UPB");
3327                                 break;
3328                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3329                                 _print_next_block(par_num++, "CSDM");
3330                                 break;
3331                         }
3332
3333                         /* Clear the bit */
3334                         sig &= ~cur_bit;
3335                 }
3336         }
3337
3338         return par_num;
3339 }
3340
3341 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3342 {
3343         int i = 0;
3344         u32 cur_bit = 0;
3345         for (i = 0; sig; i++) {
3346                 cur_bit = ((u32)0x1 << i);
3347                 if (sig & cur_bit) {
3348                         switch (cur_bit) {
3349                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3350                                 _print_next_block(par_num++, "CSEMI");
3351                                 break;
3352                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3353                                 _print_next_block(par_num++, "PXP");
3354                                 break;
3355                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3356                                 _print_next_block(par_num++,
3357                                         "PXPPCICLOCKCLIENT");
3358                                 break;
3359                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3360                                 _print_next_block(par_num++, "CFC");
3361                                 break;
3362                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3363                                 _print_next_block(par_num++, "CDU");
3364                                 break;
3365                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3366                                 _print_next_block(par_num++, "IGU");
3367                                 break;
3368                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3369                                 _print_next_block(par_num++, "MISC");
3370                                 break;
3371                         }
3372
3373                         /* Clear the bit */
3374                         sig &= ~cur_bit;
3375                 }
3376         }
3377
3378         return par_num;
3379 }
3380
3381 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3382 {
3383         int i = 0;
3384         u32 cur_bit = 0;
3385         for (i = 0; sig; i++) {
3386                 cur_bit = ((u32)0x1 << i);
3387                 if (sig & cur_bit) {
3388                         switch (cur_bit) {
3389                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3390                                 _print_next_block(par_num++, "MCP ROM");
3391                                 break;
3392                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3393                                 _print_next_block(par_num++, "MCP UMP RX");
3394                                 break;
3395                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3396                                 _print_next_block(par_num++, "MCP UMP TX");
3397                                 break;
3398                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3399                                 _print_next_block(par_num++, "MCP SCPAD");
3400                                 break;
3401                         }
3402
3403                         /* Clear the bit */
3404                         sig &= ~cur_bit;
3405                 }
3406         }
3407
3408         return par_num;
3409 }
3410
3411 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3412                                      u32 sig2, u32 sig3)
3413 {
3414         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3415             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3416                 int par_num = 0;
3417                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3418                         "[0]:0x%08x [1]:0x%08x "
3419                         "[2]:0x%08x [3]:0x%08x\n",
3420                           sig0 & HW_PRTY_ASSERT_SET_0,
3421                           sig1 & HW_PRTY_ASSERT_SET_1,
3422                           sig2 & HW_PRTY_ASSERT_SET_2,
3423                           sig3 & HW_PRTY_ASSERT_SET_3);
3424                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3425                        bp->dev->name);
3426                 par_num = bnx2x_print_blocks_with_parity0(
3427                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3428                 par_num = bnx2x_print_blocks_with_parity1(
3429                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3430                 par_num = bnx2x_print_blocks_with_parity2(
3431                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3432                 par_num = bnx2x_print_blocks_with_parity3(
3433                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3434                 printk("\n");
3435                 return true;
3436         } else
3437                 return false;
3438 }
3439
3440 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3441 {
3442         struct attn_route attn;
3443         int port = BP_PORT(bp);
3444
3445         attn.sig[0] = REG_RD(bp,
3446                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3447                              port*4);
3448         attn.sig[1] = REG_RD(bp,
3449                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3450                              port*4);
3451         attn.sig[2] = REG_RD(bp,
3452                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3453                              port*4);
3454         attn.sig[3] = REG_RD(bp,
3455                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3456                              port*4);
3457
3458         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3459                                         attn.sig[3]);
3460 }
3461
3462
3463 static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3464 {
3465         u32 val;
3466         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3467
3468                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3469                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3470                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3471                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3472                                   "ADDRESS_ERROR\n");
3473                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3474                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3475                                   "INCORRECT_RCV_BEHAVIOR\n");
3476                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3477                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3478                                   "WAS_ERROR_ATTN\n");
3479                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3480                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3481                                   "VF_LENGTH_VIOLATION_ATTN\n");
3482                 if (val &
3483                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3484                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3485                                   "VF_GRC_SPACE_VIOLATION_ATTN\n");
3486                 if (val &
3487                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3488                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3489                                   "VF_MSIX_BAR_VIOLATION_ATTN\n");
3490                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3491                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3492                                   "TCPL_ERROR_ATTN\n");
3493                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3494                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3495                                   "TCPL_IN_TWO_RCBS_ATTN\n");
3496                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3497                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3498                                   "CSSNOOP_FIFO_OVERFLOW\n");
3499         }
3500         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3501                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3502                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3503                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3504                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3505                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3506                         BNX2X_ERR("ATC_ATC_INT_STS_REG"
3507                                   "_ATC_TCPL_TO_NOT_PEND\n");
3508                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3509                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3510                                   "ATC_GPA_MULTIPLE_HITS\n");
3511                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3512                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3513                                   "ATC_RCPL_TO_EMPTY_CNT\n");
3514                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3515                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3516                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3517                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3518                                   "ATC_IREQ_LESS_THAN_STU\n");
3519         }
3520
3521         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3522                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3523                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3524                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3525                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3526         }
3527
3528 }
3529
3530 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3531 {
3532         struct attn_route attn, *group_mask;
3533         int port = BP_PORT(bp);
3534         int index;
3535         u32 reg_addr;
3536         u32 val;
3537         u32 aeu_mask;
3538
3539         /* need to take HW lock because MCP or other port might also
3540            try to handle this event */
3541         bnx2x_acquire_alr(bp);
3542
3543         if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
3544                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3545                 bnx2x_set_reset_in_progress(bp);
3546                 schedule_delayed_work(&bp->reset_task, 0);
3547                 /* Disable HW interrupts */
3548                 bnx2x_int_disable(bp);
3549                 bnx2x_release_alr(bp);
3550                 /* In case of parity errors don't handle attentions so that
3551                  * other function would "see" parity errors.
3552                  */
3553                 return;
3554         }
3555
3556         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3557         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3558         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3559         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3560         if (CHIP_IS_E2(bp))
3561                 attn.sig[4] =
3562                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3563         else
3564                 attn.sig[4] = 0;
3565
3566         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3567            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3568
3569         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3570                 if (deasserted & (1 << index)) {
3571                         group_mask = &bp->attn_group[index];
3572
3573                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3574                                          "%08x %08x %08x\n",
3575                            index,
3576                            group_mask->sig[0], group_mask->sig[1],
3577                            group_mask->sig[2], group_mask->sig[3],
3578                            group_mask->sig[4]);
3579
3580                         bnx2x_attn_int_deasserted4(bp,
3581                                         attn.sig[4] & group_mask->sig[4]);
3582                         bnx2x_attn_int_deasserted3(bp,
3583                                         attn.sig[3] & group_mask->sig[3]);
3584                         bnx2x_attn_int_deasserted1(bp,
3585                                         attn.sig[1] & group_mask->sig[1]);
3586                         bnx2x_attn_int_deasserted2(bp,
3587                                         attn.sig[2] & group_mask->sig[2]);
3588                         bnx2x_attn_int_deasserted0(bp,
3589                                         attn.sig[0] & group_mask->sig[0]);
3590                 }
3591         }
3592
3593         bnx2x_release_alr(bp);
3594
3595         if (bp->common.int_block == INT_BLOCK_HC)
3596                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3597                             COMMAND_REG_ATTN_BITS_CLR);
3598         else
3599                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3600
3601         val = ~deasserted;
3602         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3603            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3604         REG_WR(bp, reg_addr, val);
3605
3606         if (~bp->attn_state & deasserted)
3607                 BNX2X_ERR("IGU ERROR\n");
3608
3609         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3610                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3611
3612         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3613         aeu_mask = REG_RD(bp, reg_addr);
3614
3615         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3616            aeu_mask, deasserted);
3617         aeu_mask |= (deasserted & 0x3ff);
3618         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3619
3620         REG_WR(bp, reg_addr, aeu_mask);
3621         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3622
3623         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3624         bp->attn_state &= ~deasserted;
3625         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3626 }
3627
3628 static void bnx2x_attn_int(struct bnx2x *bp)
3629 {
3630         /* read local copy of bits */
3631         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3632                                                                 attn_bits);
3633         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3634                                                                 attn_bits_ack);
3635         u32 attn_state = bp->attn_state;
3636
3637         /* look for changed bits */
3638         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3639         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3640
3641         DP(NETIF_MSG_HW,
3642            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3643            attn_bits, attn_ack, asserted, deasserted);
3644
3645         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3646                 BNX2X_ERR("BAD attention state\n");
3647
3648         /* handle bits that were raised */
3649         if (asserted)
3650                 bnx2x_attn_int_asserted(bp, asserted);
3651
3652         if (deasserted)
3653                 bnx2x_attn_int_deasserted(bp, deasserted);
3654 }
3655
3656 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3657 {
3658         /* No memory barriers */
3659         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3660         mmiowb(); /* keep prod updates ordered */
3661 }
3662
3663 #ifdef BCM_CNIC
3664 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3665                                       union event_ring_elem *elem)
3666 {
3667         if (!bp->cnic_eth_dev.starting_cid  ||
3668             cid < bp->cnic_eth_dev.starting_cid)
3669                 return 1;
3670
3671         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3672
3673         if (unlikely(elem->message.data.cfc_del_event.error)) {
3674                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3675                           cid);
3676                 bnx2x_panic_dump(bp);
3677         }
3678         bnx2x_cnic_cfc_comp(bp, cid);
3679         return 0;
3680 }
3681 #endif
3682
3683 static void bnx2x_eq_int(struct bnx2x *bp)
3684 {
3685         u16 hw_cons, sw_cons, sw_prod;
3686         union event_ring_elem *elem;
3687         u32 cid;
3688         u8 opcode;
3689         int spqe_cnt = 0;
3690
3691         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3692
3693         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3694          * when we get the the next-page we nned to adjust so the loop
3695          * condition below will be met. The next element is the size of a
3696          * regular element and hence incrementing by 1
3697          */
3698         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3699                 hw_cons++;
3700
3701         /* This function may never run in parralel with itself for a
3702          * specific bp, thus there is no need in "paired" read memory
3703          * barrier here.
3704          */
3705         sw_cons = bp->eq_cons;
3706         sw_prod = bp->eq_prod;
3707
3708         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->cq_spq_left %u\n",
3709                         hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
3710
3711         for (; sw_cons != hw_cons;
3712               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3713
3714
3715                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3716
3717                 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3718                 opcode = elem->message.opcode;
3719
3720
3721                 /* handle eq element */
3722                 switch (opcode) {
3723                 case EVENT_RING_OPCODE_STAT_QUERY:
3724                         DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3725                         /* nothing to do with stats comp */
3726                         continue;
3727
3728                 case EVENT_RING_OPCODE_CFC_DEL:
3729                         /* handle according to cid range */
3730                         /*
3731                          * we may want to verify here that the bp state is
3732                          * HALTING
3733                          */
3734                         DP(NETIF_MSG_IFDOWN,
3735                            "got delete ramrod for MULTI[%d]\n", cid);
3736 #ifdef BCM_CNIC
3737                         if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3738                                 goto next_spqe;
3739                         if (cid == BNX2X_FCOE_ETH_CID)
3740                                 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3741                         else
3742 #endif
3743                                 bnx2x_fp(bp, cid, state) =
3744                                                 BNX2X_FP_STATE_CLOSED;
3745
3746                         goto next_spqe;
3747
3748                 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3749                         DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3750                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3751                         goto next_spqe;
3752                 case EVENT_RING_OPCODE_START_TRAFFIC:
3753                         DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3754                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3755                         goto next_spqe;
3756                 }
3757
3758                 switch (opcode | bp->state) {
3759                 case (EVENT_RING_OPCODE_FUNCTION_START |
3760                       BNX2X_STATE_OPENING_WAIT4_PORT):
3761                         DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3762                         bp->state = BNX2X_STATE_FUNC_STARTED;
3763                         break;
3764
3765                 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3766                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3767                         DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3768                         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3769                         break;
3770
3771                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3772                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3773                         DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3774                         if (elem->message.data.set_mac_event.echo)
3775                                 bp->set_mac_pending = 0;
3776                         break;
3777
3778                 case (EVENT_RING_OPCODE_SET_MAC |
3779                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3780                         DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3781                         if (elem->message.data.set_mac_event.echo)
3782                                 bp->set_mac_pending = 0;
3783                         break;
3784                 default:
3785                         /* unknown event log error and continue */
3786                         BNX2X_ERR("Unknown EQ event %d\n",
3787                                   elem->message.opcode);
3788                 }
3789 next_spqe:
3790                 spqe_cnt++;
3791         } /* for */
3792
3793         smp_mb__before_atomic_inc();
3794         atomic_add(spqe_cnt, &bp->eq_spq_left);
3795
3796         bp->eq_cons = sw_cons;
3797         bp->eq_prod = sw_prod;
3798         /* Make sure that above mem writes were issued towards the memory */
3799         smp_wmb();
3800
3801         /* update producer */
3802         bnx2x_update_eq_prod(bp, bp->eq_prod);
3803 }
3804
3805 static void bnx2x_sp_task(struct work_struct *work)
3806 {
3807         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3808         u16 status;
3809
3810         /* Return here if interrupt is disabled */
3811         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3812                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3813                 return;
3814         }
3815
3816         status = bnx2x_update_dsb_idx(bp);
3817 /*      if (status == 0)                                     */
3818 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3819
3820         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3821
3822         /* HW attentions */
3823         if (status & BNX2X_DEF_SB_ATT_IDX) {
3824                 bnx2x_attn_int(bp);
3825                 status &= ~BNX2X_DEF_SB_ATT_IDX;
3826         }
3827
3828         /* SP events: STAT_QUERY and others */
3829         if (status & BNX2X_DEF_SB_IDX) {
3830 #ifdef BCM_CNIC
3831                 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
3832
3833                 if ((!NO_FCOE(bp)) &&
3834                         (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3835                         napi_schedule(&bnx2x_fcoe(bp, napi));
3836 #endif
3837                 /* Handle EQ completions */
3838                 bnx2x_eq_int(bp);
3839
3840                 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3841                         le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3842
3843                 status &= ~BNX2X_DEF_SB_IDX;
3844         }
3845
3846         if (unlikely(status))
3847                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3848                    status);
3849
3850         bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3851              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3852 }
3853
3854 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3855 {
3856         struct net_device *dev = dev_instance;
3857         struct bnx2x *bp = netdev_priv(dev);
3858
3859         /* Return here if interrupt is disabled */
3860         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3861                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3862                 return IRQ_HANDLED;
3863         }
3864
3865         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3866                      IGU_INT_DISABLE, 0);
3867
3868 #ifdef BNX2X_STOP_ON_ERROR
3869         if (unlikely(bp->panic))
3870                 return IRQ_HANDLED;
3871 #endif
3872
3873 #ifdef BCM_CNIC
3874         {
3875                 struct cnic_ops *c_ops;
3876
3877                 rcu_read_lock();
3878                 c_ops = rcu_dereference(bp->cnic_ops);
3879                 if (c_ops)
3880                         c_ops->cnic_handler(bp->cnic_data, NULL);
3881                 rcu_read_unlock();
3882         }
3883 #endif
3884         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3885
3886         return IRQ_HANDLED;
3887 }
3888
3889 /* end of slow path */
3890
3891 static void bnx2x_timer(unsigned long data)
3892 {
3893         struct bnx2x *bp = (struct bnx2x *) data;
3894
3895         if (!netif_running(bp->dev))
3896                 return;
3897
3898         if (atomic_read(&bp->intr_sem) != 0)
3899                 goto timer_restart;
3900
3901         if (poll) {
3902                 struct bnx2x_fastpath *fp = &bp->fp[0];
3903                 int rc;
3904
3905                 bnx2x_tx_int(fp);
3906                 rc = bnx2x_rx_int(fp, 1000);
3907         }
3908
3909         if (!BP_NOMCP(bp)) {
3910                 int mb_idx = BP_FW_MB_IDX(bp);
3911                 u32 drv_pulse;
3912                 u32 mcp_pulse;
3913
3914                 ++bp->fw_drv_pulse_wr_seq;
3915                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3916                 /* TBD - add SYSTEM_TIME */
3917                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3918                 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3919
3920                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3921                              MCP_PULSE_SEQ_MASK);
3922                 /* The delta between driver pulse and mcp response
3923                  * should be 1 (before mcp response) or 0 (after mcp response)
3924                  */
3925                 if ((drv_pulse != mcp_pulse) &&
3926                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3927                         /* someone lost a heartbeat... */
3928                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3929                                   drv_pulse, mcp_pulse);
3930                 }
3931         }
3932
3933         if (bp->state == BNX2X_STATE_OPEN)
3934                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3935
3936 timer_restart:
3937         mod_timer(&bp->timer, jiffies + bp->current_interval);
3938 }
3939
3940 /* end of Statistics */
3941
3942 /* nic init */
3943
3944 /*
3945  * nic init service functions
3946  */
3947
3948 static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3949 {
3950         u32 i;
3951         if (!(len%4) && !(addr%4))
3952                 for (i = 0; i < len; i += 4)
3953                         REG_WR(bp, addr + i, fill);
3954         else
3955                 for (i = 0; i < len; i++)
3956                         REG_WR8(bp, addr + i, fill);
3957
3958 }
3959
3960 /* helper: writes FP SP data to FW - data_size in dwords */
3961 static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3962                                        int fw_sb_id,
3963                                        u32 *sb_data_p,
3964                                        u32 data_size)
3965 {
3966         int index;
3967         for (index = 0; index < data_size; index++)
3968                 REG_WR(bp, BAR_CSTRORM_INTMEM +
3969                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3970                         sizeof(u32)*index,
3971                         *(sb_data_p + index));
3972 }
3973
3974 static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3975 {
3976         u32 *sb_data_p;
3977         u32 data_size = 0;
3978         struct hc_status_block_data_e2 sb_data_e2;
3979         struct hc_status_block_data_e1x sb_data_e1x;
3980
3981         /* disable the function first */
3982         if (CHIP_IS_E2(bp)) {
3983                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3984                 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3985                 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3986                 sb_data_e2.common.p_func.vf_valid = false;
3987                 sb_data_p = (u32 *)&sb_data_e2;
3988                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3989         } else {
3990                 memset(&sb_data_e1x, 0,
3991                        sizeof(struct hc_status_block_data_e1x));
3992                 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3993                 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3994                 sb_data_e1x.common.p_func.vf_valid = false;
3995                 sb_data_p = (u32 *)&sb_data_e1x;
3996                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3997         }
3998         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3999
4000         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4001                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
4002                         CSTORM_STATUS_BLOCK_SIZE);
4003         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4004                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
4005                         CSTORM_SYNC_BLOCK_SIZE);
4006 }
4007
4008 /* helper:  writes SP SB data to FW */
4009 static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
4010                 struct hc_sp_status_block_data *sp_sb_data)
4011 {
4012         int func = BP_FUNC(bp);
4013         int i;
4014         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
4015                 REG_WR(bp, BAR_CSTRORM_INTMEM +
4016                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
4017                         i*sizeof(u32),
4018                         *((u32 *)sp_sb_data + i));
4019 }
4020
4021 static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
4022 {
4023         int func = BP_FUNC(bp);
4024         struct hc_sp_status_block_data sp_sb_data;
4025         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4026
4027         sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4028         sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4029         sp_sb_data.p_func.vf_valid = false;
4030
4031         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4032
4033         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4034                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4035                         CSTORM_SP_STATUS_BLOCK_SIZE);
4036         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4037                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4038                         CSTORM_SP_SYNC_BLOCK_SIZE);
4039
4040 }
4041
4042
4043 static inline
4044 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4045                                            int igu_sb_id, int igu_seg_id)
4046 {
4047         hc_sm->igu_sb_id = igu_sb_id;
4048         hc_sm->igu_seg_id = igu_seg_id;
4049         hc_sm->timer_value = 0xFF;
4050         hc_sm->time_to_expire = 0xFFFFFFFF;
4051 }
4052
4053 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4054                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
4055 {
4056         int igu_seg_id;
4057
4058         struct hc_status_block_data_e2 sb_data_e2;
4059         struct hc_status_block_data_e1x sb_data_e1x;
4060         struct hc_status_block_sm  *hc_sm_p;
4061         struct hc_index_data *hc_index_p;
4062         int data_size;
4063         u32 *sb_data_p;
4064
4065         if (CHIP_INT_MODE_IS_BC(bp))
4066                 igu_seg_id = HC_SEG_ACCESS_NORM;
4067         else
4068                 igu_seg_id = IGU_SEG_ACCESS_NORM;
4069
4070         bnx2x_zero_fp_sb(bp, fw_sb_id);
4071
4072         if (CHIP_IS_E2(bp)) {
4073                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4074                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4075                 sb_data_e2.common.p_func.vf_id = vfid;
4076                 sb_data_e2.common.p_func.vf_valid = vf_valid;
4077                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4078                 sb_data_e2.common.same_igu_sb_1b = true;
4079                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4080                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4081                 hc_sm_p = sb_data_e2.common.state_machine;
4082                 hc_index_p = sb_data_e2.index_data;
4083                 sb_data_p = (u32 *)&sb_data_e2;
4084                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4085         } else {
4086                 memset(&sb_data_e1x, 0,
4087                        sizeof(struct hc_status_block_data_e1x));
4088                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4089                 sb_data_e1x.common.p_func.vf_id = 0xff;
4090                 sb_data_e1x.common.p_func.vf_valid = false;
4091                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4092                 sb_data_e1x.common.same_igu_sb_1b = true;
4093                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4094                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4095                 hc_sm_p = sb_data_e1x.common.state_machine;
4096                 hc_index_p = sb_data_e1x.index_data;
4097                 sb_data_p = (u32 *)&sb_data_e1x;
4098                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4099         }
4100
4101         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4102                                        igu_sb_id, igu_seg_id);
4103         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4104                                        igu_sb_id, igu_seg_id);
4105
4106         DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4107
4108         /* write indecies to HW */
4109         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4110 }
4111
4112 static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4113                                         u8 sb_index, u8 disable, u16 usec)
4114 {
4115         int port = BP_PORT(bp);
4116         u8 ticks = usec / BNX2X_BTR;
4117
4118         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4119
4120         disable = disable ? 1 : (usec ? 0 : 1);
4121         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4122 }
4123
4124 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4125                                      u16 tx_usec, u16 rx_usec)
4126 {
4127         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4128                                     false, rx_usec);
4129         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4130                                     false, tx_usec);
4131 }
4132
4133 static void bnx2x_init_def_sb(struct bnx2x *bp)
4134 {
4135         struct host_sp_status_block *def_sb = bp->def_status_blk;
4136         dma_addr_t mapping = bp->def_status_blk_mapping;
4137         int igu_sp_sb_index;
4138         int igu_seg_id;
4139         int port = BP_PORT(bp);
4140         int func = BP_FUNC(bp);
4141         int reg_offset;
4142         u64 section;
4143         int index;
4144         struct hc_sp_status_block_data sp_sb_data;
4145         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4146
4147         if (CHIP_INT_MODE_IS_BC(bp)) {
4148                 igu_sp_sb_index = DEF_SB_IGU_ID;
4149                 igu_seg_id = HC_SEG_ACCESS_DEF;
4150         } else {
4151                 igu_sp_sb_index = bp->igu_dsb_id;
4152                 igu_seg_id = IGU_SEG_ACCESS_DEF;
4153         }
4154
4155         /* ATTN */
4156         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4157                                             atten_status_block);
4158         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4159
4160         bp->attn_state = 0;
4161
4162         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4163                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4164         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4165                 int sindex;
4166                 /* take care of sig[0]..sig[4] */
4167                 for (sindex = 0; sindex < 4; sindex++)
4168                         bp->attn_group[index].sig[sindex] =
4169                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4170
4171                 if (CHIP_IS_E2(bp))
4172                         /*
4173                          * enable5 is separate from the rest of the registers,
4174                          * and therefore the address skip is 4
4175                          * and not 16 between the different groups
4176                          */
4177                         bp->attn_group[index].sig[4] = REG_RD(bp,
4178                                         reg_offset + 0x10 + 0x4*index);
4179                 else
4180                         bp->attn_group[index].sig[4] = 0;
4181         }
4182
4183         if (bp->common.int_block == INT_BLOCK_HC) {
4184                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4185                                      HC_REG_ATTN_MSG0_ADDR_L);
4186
4187                 REG_WR(bp, reg_offset, U64_LO(section));
4188                 REG_WR(bp, reg_offset + 4, U64_HI(section));
4189         } else if (CHIP_IS_E2(bp)) {
4190                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4191                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4192         }
4193
4194         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4195                                             sp_sb);
4196
4197         bnx2x_zero_sp_sb(bp);
4198
4199         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
4200         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
4201         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
4202         sp_sb_data.igu_seg_id           = igu_seg_id;
4203         sp_sb_data.p_func.pf_id         = func;
4204         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
4205         sp_sb_data.p_func.vf_id         = 0xff;
4206
4207         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4208
4209         bp->stats_pending = 0;
4210         bp->set_mac_pending = 0;
4211
4212         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
4213 }
4214
4215 void bnx2x_update_coalesce(struct bnx2x *bp)
4216 {
4217         int i;
4218
4219         for_each_eth_queue(bp, i)
4220                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4221                                          bp->rx_ticks, bp->tx_ticks);
4222 }
4223
4224 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4225 {
4226         spin_lock_init(&bp->spq_lock);
4227         atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
4228
4229         bp->spq_prod_idx = 0;
4230         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4231         bp->spq_prod_bd = bp->spq;
4232         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4233 }
4234
4235 static void bnx2x_init_eq_ring(struct bnx2x *bp)
4236 {
4237         int i;
4238         for (i = 1; i <= NUM_EQ_PAGES; i++) {
4239                 union event_ring_elem *elem =
4240                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
4241
4242                 elem->next_page.addr.hi =
4243                         cpu_to_le32(U64_HI(bp->eq_mapping +
4244                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4245                 elem->next_page.addr.lo =
4246                         cpu_to_le32(U64_LO(bp->eq_mapping +
4247                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
4248         }
4249         bp->eq_cons = 0;
4250         bp->eq_prod = NUM_EQ_DESC;
4251         bp->eq_cons_sb = BNX2X_EQ_INDEX;
4252         /* we want a warning message before it gets rought... */
4253         atomic_set(&bp->eq_spq_left,
4254                 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
4255 }
4256
4257 static void bnx2x_init_ind_table(struct bnx2x *bp)
4258 {
4259         int func = BP_FUNC(bp);
4260         int i;
4261
4262         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4263                 return;
4264
4265         DP(NETIF_MSG_IFUP,
4266            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4267         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4268                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4269                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4270                         bp->fp->cl_id + (i % (bp->num_queues -
4271                                 NONE_ETH_CONTEXT_USE)));
4272 }
4273
4274 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4275 {
4276         int mode = bp->rx_mode;
4277         int port = BP_PORT(bp);
4278         u16 cl_id;
4279         u32 def_q_filters = 0;
4280
4281         /* All but management unicast packets should pass to the host as well */
4282         u32 llh_mask =
4283                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4284                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4285                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4286                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4287
4288         switch (mode) {
4289         case BNX2X_RX_MODE_NONE: /* no Rx */
4290                 def_q_filters = BNX2X_ACCEPT_NONE;
4291 #ifdef BCM_CNIC
4292                 if (!NO_FCOE(bp)) {
4293                         cl_id = bnx2x_fcoe(bp, cl_id);
4294                         bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4295                 }
4296 #endif
4297                 break;
4298
4299         case BNX2X_RX_MODE_NORMAL:
4300                 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4301                                 BNX2X_ACCEPT_MULTICAST;
4302 #ifdef BCM_CNIC
4303                 if (!NO_FCOE(bp)) {
4304                         cl_id = bnx2x_fcoe(bp, cl_id);
4305                         bnx2x_rxq_set_mac_filters(bp, cl_id,
4306                                                   BNX2X_ACCEPT_UNICAST |
4307                                                   BNX2X_ACCEPT_MULTICAST);
4308                 }
4309 #endif
4310                 break;
4311
4312         case BNX2X_RX_MODE_ALLMULTI:
4313                 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4314                                 BNX2X_ACCEPT_ALL_MULTICAST;
4315 #ifdef BCM_CNIC
4316                 /*
4317                  *  Prevent duplication of multicast packets by configuring FCoE
4318                  *  L2 Client to receive only matched unicast frames.
4319                  */
4320                 if (!NO_FCOE(bp)) {
4321                         cl_id = bnx2x_fcoe(bp, cl_id);
4322                         bnx2x_rxq_set_mac_filters(bp, cl_id,
4323                                                   BNX2X_ACCEPT_UNICAST);
4324                 }
4325 #endif
4326                 break;
4327
4328         case BNX2X_RX_MODE_PROMISC:
4329                 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4330 #ifdef BCM_CNIC
4331                 /*
4332                  *  Prevent packets duplication by configuring DROP_ALL for FCoE
4333                  *  L2 Client.
4334                  */
4335                 if (!NO_FCOE(bp)) {
4336                         cl_id = bnx2x_fcoe(bp, cl_id);
4337                         bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4338                 }
4339 #endif
4340                 /* pass management unicast packets as well */
4341                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4342                 break;
4343
4344         default:
4345                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4346                 break;
4347         }
4348
4349         cl_id = BP_L_ID(bp);
4350         bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4351
4352         REG_WR(bp,
4353                (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4354                        NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
4355
4356         DP(NETIF_MSG_IFUP, "rx mode %d\n"
4357                 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4358                 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4359                 "unmatched_ucast 0x%x\n", mode,
4360                 bp->mac_filters.ucast_drop_all,
4361                 bp->mac_filters.mcast_drop_all,
4362                 bp->mac_filters.bcast_drop_all,
4363                 bp->mac_filters.ucast_accept_all,
4364                 bp->mac_filters.mcast_accept_all,
4365                 bp->mac_filters.bcast_accept_all,
4366                 bp->mac_filters.unmatched_unicast
4367         );
4368
4369         storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4370 }
4371
4372 static void bnx2x_init_internal_common(struct bnx2x *bp)
4373 {
4374         int i;
4375
4376         if (!CHIP_IS_E1(bp)) {
4377
4378                 /* xstorm needs to know whether to add  ovlan to packets or not,
4379                  * in switch-independent we'll write 0 to here... */
4380                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4381                         bp->mf_mode);
4382                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4383                         bp->mf_mode);
4384                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4385                         bp->mf_mode);
4386                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4387                         bp->mf_mode);
4388         }
4389
4390         if (IS_MF_SI(bp))
4391                 /*
4392                  * In switch independent mode, the TSTORM needs to accept
4393                  * packets that failed classification, since approximate match
4394                  * mac addresses aren't written to NIG LLH
4395                  */
4396                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4397                             TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4398
4399         /* Zero this manually as its initialization is
4400            currently missing in the initTool */
4401         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4402                 REG_WR(bp, BAR_USTRORM_INTMEM +
4403                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4404         if (CHIP_IS_E2(bp)) {
4405                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4406                         CHIP_INT_MODE_IS_BC(bp) ?
4407                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4408         }
4409 }
4410
4411 static void bnx2x_init_internal_port(struct bnx2x *bp)
4412 {
4413         /* port */
4414         bnx2x_dcb_init_intmem_pfc(bp);
4415 }
4416
4417 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4418 {
4419         switch (load_code) {
4420         case FW_MSG_CODE_DRV_LOAD_COMMON:
4421         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4422                 bnx2x_init_internal_common(bp);
4423                 /* no break */
4424
4425         case FW_MSG_CODE_DRV_LOAD_PORT:
4426                 bnx2x_init_internal_port(bp);
4427                 /* no break */
4428
4429         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4430                 /* internal memory per function is
4431                    initialized inside bnx2x_pf_init */
4432                 break;
4433
4434         default:
4435                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4436                 break;
4437         }
4438 }
4439
4440 static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4441 {
4442         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4443
4444         fp->state = BNX2X_FP_STATE_CLOSED;
4445
4446         fp->index = fp->cid = fp_idx;
4447         fp->cl_id = BP_L_ID(bp) + fp_idx;
4448         fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4449         fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4450         /* qZone id equals to FW (per path) client id */
4451         fp->cl_qzone_id  = fp->cl_id +
4452                            BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4453                                 ETH_MAX_RX_CLIENTS_E1H);
4454         /* init shortcut */
4455         fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4456                             USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4457                             USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4458         /* Setup SB indicies */
4459         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4460         fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4461
4462         DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  "
4463                                    "cl_id %d  fw_sb %d  igu_sb %d\n",
4464                    fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4465                    fp->igu_sb_id);
4466         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4467                       fp->fw_sb_id, fp->igu_sb_id);
4468
4469         bnx2x_update_fpsb_idx(fp);
4470 }
4471
4472 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4473 {
4474         int i;
4475
4476         for_each_eth_queue(bp, i)
4477                 bnx2x_init_fp_sb(bp, i);
4478 #ifdef BCM_CNIC
4479         if (!NO_FCOE(bp))
4480                 bnx2x_init_fcoe_fp(bp);
4481
4482         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4483                       BNX2X_VF_ID_INVALID, false,
4484                       CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4485
4486 #endif
4487
4488         /* ensure status block indices were read */
4489         rmb();
4490
4491         bnx2x_init_def_sb(bp);
4492         bnx2x_update_dsb_idx(bp);
4493         bnx2x_init_rx_rings(bp);
4494         bnx2x_init_tx_rings(bp);
4495         bnx2x_init_sp_ring(bp);
4496         bnx2x_init_eq_ring(bp);
4497         bnx2x_init_internal(bp, load_code);
4498         bnx2x_pf_init(bp);
4499         bnx2x_init_ind_table(bp);
4500         bnx2x_stats_init(bp);
4501
4502         /* At this point, we are ready for interrupts */
4503         atomic_set(&bp->intr_sem, 0);
4504
4505         /* flush all before enabling interrupts */
4506         mb();
4507         mmiowb();
4508
4509         bnx2x_int_enable(bp);
4510
4511         /* Check for SPIO5 */
4512         bnx2x_attn_int_deasserted0(bp,
4513                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4514                                    AEU_INPUTS_ATTN_BITS_SPIO5);
4515 }
4516
4517 /* end of nic init */
4518
4519 /*
4520  * gzip service functions
4521  */
4522
4523 static int bnx2x_gunzip_init(struct bnx2x *bp)
4524 {
4525         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4526                                             &bp->gunzip_mapping, GFP_KERNEL);
4527         if (bp->gunzip_buf  == NULL)
4528                 goto gunzip_nomem1;
4529
4530         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4531         if (bp->strm  == NULL)
4532                 goto gunzip_nomem2;
4533
4534         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4535                                       GFP_KERNEL);
4536         if (bp->strm->workspace == NULL)
4537                 goto gunzip_nomem3;
4538
4539         return 0;
4540
4541 gunzip_nomem3:
4542         kfree(bp->strm);
4543         bp->strm = NULL;
4544
4545 gunzip_nomem2:
4546         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4547                           bp->gunzip_mapping);
4548         bp->gunzip_buf = NULL;
4549
4550 gunzip_nomem1:
4551         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4552                " un-compression\n");
4553         return -ENOMEM;
4554 }
4555
4556 static void bnx2x_gunzip_end(struct bnx2x *bp)
4557 {
4558         kfree(bp->strm->workspace);
4559         kfree(bp->strm);
4560         bp->strm = NULL;
4561
4562         if (bp->gunzip_buf) {
4563                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4564                                   bp->gunzip_mapping);
4565                 bp->gunzip_buf = NULL;
4566         }
4567 }
4568
4569 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4570 {
4571         int n, rc;
4572
4573         /* check gzip header */
4574         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4575                 BNX2X_ERR("Bad gzip header\n");
4576                 return -EINVAL;
4577         }
4578
4579         n = 10;
4580
4581 #define FNAME                           0x8
4582
4583         if (zbuf[3] & FNAME)
4584                 while ((zbuf[n++] != 0) && (n < len));
4585
4586         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4587         bp->strm->avail_in = len - n;
4588         bp->strm->next_out = bp->gunzip_buf;
4589         bp->strm->avail_out = FW_BUF_SIZE;
4590
4591         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4592         if (rc != Z_OK)
4593                 return rc;
4594
4595         rc = zlib_inflate(bp->strm, Z_FINISH);
4596         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4597                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4598                            bp->strm->msg);
4599
4600         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4601         if (bp->gunzip_outlen & 0x3)
4602                 netdev_err(bp->dev, "Firmware decompression error:"
4603                                     " gunzip_outlen (%d) not aligned\n",
4604                                 bp->gunzip_outlen);
4605         bp->gunzip_outlen >>= 2;
4606
4607         zlib_inflateEnd(bp->strm);
4608
4609         if (rc == Z_STREAM_END)
4610                 return 0;
4611
4612         return rc;
4613 }
4614
4615 /* nic load/unload */
4616
4617 /*
4618  * General service functions
4619  */
4620
4621 /* send a NIG loopback debug packet */
4622 static void bnx2x_lb_pckt(struct bnx2x *bp)
4623 {
4624         u32 wb_write[3];
4625
4626         /* Ethernet source and destination addresses */
4627         wb_write[0] = 0x55555555;
4628         wb_write[1] = 0x55555555;
4629         wb_write[2] = 0x20;             /* SOP */
4630         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4631
4632         /* NON-IP protocol */
4633         wb_write[0] = 0x09000000;
4634         wb_write[1] = 0x55555555;
4635         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4636         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4637 }
4638
4639 /* some of the internal memories
4640  * are not directly readable from the driver
4641  * to test them we send debug packets
4642  */
4643 static int bnx2x_int_mem_test(struct bnx2x *bp)
4644 {
4645         int factor;
4646         int count, i;
4647         u32 val = 0;
4648
4649         if (CHIP_REV_IS_FPGA(bp))
4650                 factor = 120;
4651         else if (CHIP_REV_IS_EMUL(bp))
4652                 factor = 200;
4653         else
4654                 factor = 1;
4655
4656         /* Disable inputs of parser neighbor blocks */
4657         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4658         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4659         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4660         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4661
4662         /*  Write 0 to parser credits for CFC search request */
4663         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4664
4665         /* send Ethernet packet */
4666         bnx2x_lb_pckt(bp);
4667
4668         /* TODO do i reset NIG statistic? */
4669         /* Wait until NIG register shows 1 packet of size 0x10 */
4670         count = 1000 * factor;
4671         while (count) {
4672
4673                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4674                 val = *bnx2x_sp(bp, wb_data[0]);
4675                 if (val == 0x10)
4676                         break;
4677
4678                 msleep(10);
4679                 count--;
4680         }
4681         if (val != 0x10) {
4682                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4683                 return -1;
4684         }
4685
4686         /* Wait until PRS register shows 1 packet */
4687         count = 1000 * factor;
4688         while (count) {
4689                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4690                 if (val == 1)
4691                         break;
4692
4693                 msleep(10);
4694                 count--;
4695         }
4696         if (val != 0x1) {
4697                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4698                 return -2;
4699         }
4700
4701         /* Reset and init BRB, PRS */
4702         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4703         msleep(50);
4704         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4705         msleep(50);
4706         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4707         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4708
4709         DP(NETIF_MSG_HW, "part2\n");
4710
4711         /* Disable inputs of parser neighbor blocks */
4712         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4713         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4714         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4715         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4716
4717         /* Write 0 to parser credits for CFC search request */
4718         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4719
4720         /* send 10 Ethernet packets */
4721         for (i = 0; i < 10; i++)
4722                 bnx2x_lb_pckt(bp);
4723
4724         /* Wait until NIG register shows 10 + 1
4725            packets of size 11*0x10 = 0xb0 */
4726         count = 1000 * factor;
4727         while (count) {
4728
4729                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4730                 val = *bnx2x_sp(bp, wb_data[0]);
4731                 if (val == 0xb0)
4732                         break;
4733
4734                 msleep(10);
4735                 count--;
4736         }
4737         if (val != 0xb0) {
4738                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4739                 return -3;
4740         }
4741
4742         /* Wait until PRS register shows 2 packets */
4743         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4744         if (val != 2)
4745                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4746
4747         /* Write 1 to parser credits for CFC search request */
4748         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4749
4750         /* Wait until PRS register shows 3 packets */
4751         msleep(10 * factor);
4752         /* Wait until NIG register shows 1 packet of size 0x10 */
4753         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4754         if (val != 3)
4755                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4756
4757         /* clear NIG EOP FIFO */
4758         for (i = 0; i < 11; i++)
4759                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4760         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4761         if (val != 1) {
4762                 BNX2X_ERR("clear of NIG failed\n");
4763                 return -4;
4764         }
4765
4766         /* Reset and init BRB, PRS, NIG */
4767         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4768         msleep(50);
4769         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4770         msleep(50);
4771         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4772         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4773 #ifndef BCM_CNIC
4774         /* set NIC mode */
4775         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4776 #endif
4777
4778         /* Enable inputs of parser neighbor blocks */
4779         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4780         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4781         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4782         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4783
4784         DP(NETIF_MSG_HW, "done\n");
4785
4786         return 0; /* OK */
4787 }
4788
4789 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
4790 {
4791         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4792         if (CHIP_IS_E2(bp))
4793                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4794         else
4795                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4796         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4797         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4798         /*
4799          * mask read length error interrupts in brb for parser
4800          * (parsing unit and 'checksum and crc' unit)
4801          * these errors are legal (PU reads fixed length and CAC can cause
4802          * read length error on truncated packets)
4803          */
4804         REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4805         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4806         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4807         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4808         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4809         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4810 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4811 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4812         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4813         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4814         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4815 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4816 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4817         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4818         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4819         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4820         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4821 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4822 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4823
4824         if (CHIP_REV_IS_FPGA(bp))
4825                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4826         else if (CHIP_IS_E2(bp))
4827                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4828                            (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4829                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4830                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4831                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4832                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4833         else
4834                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4835         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4836         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4837         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4838 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4839 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4840         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4841         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4842 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4843         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);         /* bit 3,4 masked */
4844 }
4845
4846 static void bnx2x_reset_common(struct bnx2x *bp)
4847 {
4848         /* reset_common */
4849         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4850                0xd3ffff7f);
4851         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4852 }
4853
4854 static void bnx2x_init_pxp(struct bnx2x *bp)
4855 {
4856         u16 devctl;
4857         int r_order, w_order;
4858
4859         pci_read_config_word(bp->pdev,
4860                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4861         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4862         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4863         if (bp->mrrs == -1)
4864                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4865         else {
4866                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4867                 r_order = bp->mrrs;
4868         }
4869
4870         bnx2x_init_pxp_arb(bp, r_order, w_order);
4871 }
4872
4873 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4874 {
4875         int is_required;
4876         u32 val;
4877         int port;
4878
4879         if (BP_NOMCP(bp))
4880                 return;
4881
4882         is_required = 0;
4883         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4884               SHARED_HW_CFG_FAN_FAILURE_MASK;
4885
4886         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4887                 is_required = 1;
4888
4889         /*
4890          * The fan failure mechanism is usually related to the PHY type since
4891          * the power consumption of the board is affected by the PHY. Currently,
4892          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4893          */
4894         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4895                 for (port = PORT_0; port < PORT_MAX; port++) {
4896                         is_required |=
4897                                 bnx2x_fan_failure_det_req(
4898                                         bp,
4899                                         bp->common.shmem_base,
4900                                         bp->common.shmem2_base,
4901                                         port);
4902                 }
4903
4904         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4905
4906         if (is_required == 0)
4907                 return;
4908
4909         /* Fan failure is indicated by SPIO 5 */
4910         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4911                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
4912
4913         /* set to active low mode */
4914         val = REG_RD(bp, MISC_REG_SPIO_INT);
4915         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4916                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4917         REG_WR(bp, MISC_REG_SPIO_INT, val);
4918
4919         /* enable interrupt to signal the IGU */
4920         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4921         val |= (1 << MISC_REGISTERS_SPIO_5);
4922         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4923 }
4924
4925 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4926 {
4927         u32 offset = 0;
4928
4929         if (CHIP_IS_E1(bp))
4930                 return;
4931         if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4932                 return;
4933
4934         switch (BP_ABS_FUNC(bp)) {
4935         case 0:
4936                 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4937                 break;
4938         case 1:
4939                 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4940                 break;
4941         case 2:
4942                 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4943                 break;
4944         case 3:
4945                 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4946                 break;
4947         case 4:
4948                 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4949                 break;
4950         case 5:
4951                 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4952                 break;
4953         case 6:
4954                 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4955                 break;
4956         case 7:
4957                 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4958                 break;
4959         default:
4960                 return;
4961         }
4962
4963         REG_WR(bp, offset, pretend_func_num);
4964         REG_RD(bp, offset);
4965         DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4966 }
4967
4968 static void bnx2x_pf_disable(struct bnx2x *bp)
4969 {
4970         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4971         val &= ~IGU_PF_CONF_FUNC_EN;
4972
4973         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4974         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4975         REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4976 }
4977
4978 static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4979 {
4980         u32 val, i;
4981
4982         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_ABS_FUNC(bp));
4983
4984         bnx2x_reset_common(bp);
4985         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4986         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4987
4988         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4989         if (!CHIP_IS_E1(bp))
4990                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4991
4992         if (CHIP_IS_E2(bp)) {
4993                 u8 fid;
4994
4995                 /**
4996                  * 4-port mode or 2-port mode we need to turn of master-enable
4997                  * for everyone, after that, turn it back on for self.
4998                  * so, we disregard multi-function or not, and always disable
4999                  * for all functions on the given path, this means 0,2,4,6 for
5000                  * path 0 and 1,3,5,7 for path 1
5001                  */
5002                 for (fid = BP_PATH(bp); fid  < E2_FUNC_MAX*2; fid += 2) {
5003                         if (fid == BP_ABS_FUNC(bp)) {
5004                                 REG_WR(bp,
5005                                     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
5006                                     1);
5007                                 continue;
5008                         }
5009
5010                         bnx2x_pretend_func(bp, fid);
5011                         /* clear pf enable */
5012                         bnx2x_pf_disable(bp);
5013                         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5014                 }
5015         }
5016
5017         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5018         if (CHIP_IS_E1(bp)) {
5019                 /* enable HW interrupt from PXP on USDM overflow
5020                    bit 16 on INT_MASK_0 */
5021                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5022         }
5023
5024         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5025         bnx2x_init_pxp(bp);
5026
5027 #ifdef __BIG_ENDIAN
5028         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5029         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5030         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5031         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5032         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5033         /* make sure this value is 0 */
5034         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5035
5036 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5037         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5038         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5039         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5040         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5041 #endif
5042
5043         bnx2x_ilt_init_page_size(bp, INITOP_SET);
5044
5045         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5046                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5047
5048         /* let the HW do it's magic ... */
5049         msleep(100);
5050         /* finish PXP init */
5051         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5052         if (val != 1) {
5053                 BNX2X_ERR("PXP2 CFG failed\n");
5054                 return -EBUSY;
5055         }
5056         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5057         if (val != 1) {
5058                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5059                 return -EBUSY;
5060         }
5061
5062         /* Timers bug workaround E2 only. We need to set the entire ILT to
5063          * have entries with value "0" and valid bit on.
5064          * This needs to be done by the first PF that is loaded in a path
5065          * (i.e. common phase)
5066          */
5067         if (CHIP_IS_E2(bp)) {
5068                 struct ilt_client_info ilt_cli;
5069                 struct bnx2x_ilt ilt;
5070                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5071                 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5072
5073                 /* initialize dummy TM client */
5074                 ilt_cli.start = 0;
5075                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5076                 ilt_cli.client_num = ILT_CLIENT_TM;
5077
5078                 /* Step 1: set zeroes to all ilt page entries with valid bit on
5079                  * Step 2: set the timers first/last ilt entry to point
5080                  * to the entire range to prevent ILT range error for 3rd/4th
5081                  * vnic (this code assumes existance of the vnic)
5082                  *
5083                  * both steps performed by call to bnx2x_ilt_client_init_op()
5084                  * with dummy TM client
5085                  *
5086                  * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5087                  * and his brother are split registers
5088                  */
5089                 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5090                 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5091                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5092
5093                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5094                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5095                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5096         }
5097
5098
5099         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5100         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5101
5102         if (CHIP_IS_E2(bp)) {
5103                 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5104                                 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5105                 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5106
5107                 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5108
5109                 /* let the HW do it's magic ... */
5110                 do {
5111                         msleep(200);
5112                         val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5113                 } while (factor-- && (val != 1));
5114
5115                 if (val != 1) {
5116                         BNX2X_ERR("ATC_INIT failed\n");
5117                         return -EBUSY;
5118                 }
5119         }
5120
5121         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5122
5123         /* clean the DMAE memory */
5124         bp->dmae_ready = 1;
5125         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5126
5127         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5128         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5129         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5130         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5131
5132         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5133         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5134         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5135         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5136
5137         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5138
5139         if (CHIP_MODE_IS_4_PORT(bp))
5140                 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5141
5142         /* QM queues pointers table */
5143         bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5144
5145         /* soft reset pulse */
5146         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5147         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5148
5149 #ifdef BCM_CNIC
5150         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5151 #endif
5152
5153         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5154         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5155
5156         if (!CHIP_REV_IS_SLOW(bp)) {
5157                 /* enable hw interrupt from doorbell Q */
5158                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5159         }
5160
5161         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5162         if (CHIP_MODE_IS_4_PORT(bp)) {
5163                 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5164                 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5165         }
5166
5167         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5168         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5169 #ifndef BCM_CNIC
5170         /* set NIC mode */
5171         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5172 #endif
5173         if (!CHIP_IS_E1(bp))
5174                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
5175
5176         if (CHIP_IS_E2(bp)) {
5177                 /* Bit-map indicating which L2 hdrs may appear after the
5178                    basic Ethernet header */
5179                 int has_ovlan = IS_MF_SD(bp);
5180                 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5181                 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5182         }
5183
5184         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5185         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5186         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5187         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5188
5189         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5190         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5191         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5192         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5193
5194         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5195         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5196         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5197         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5198
5199         if (CHIP_MODE_IS_4_PORT(bp))
5200                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5201
5202         /* sync semi rtc */
5203         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5204                0x80000000);
5205         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5206                0x80000000);
5207
5208         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5209         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5210         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5211
5212         if (CHIP_IS_E2(bp)) {
5213                 int has_ovlan = IS_MF_SD(bp);
5214                 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5215                 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5216         }
5217
5218         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5219         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5220                 REG_WR(bp, i, random32());
5221
5222         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5223 #ifdef BCM_CNIC
5224         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5225         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5226         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5227         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5228         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5229         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5230         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5231         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5232         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5233         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5234 #endif
5235         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5236
5237         if (sizeof(union cdu_context) != 1024)
5238                 /* we currently assume that a context is 1024 bytes */
5239                 dev_alert(&bp->pdev->dev, "please adjust the size "
5240                                           "of cdu_context(%ld)\n",
5241                          (long)sizeof(union cdu_context));
5242
5243         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5244         val = (4 << 24) + (0 << 12) + 1024;
5245         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5246
5247         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5248         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5249         /* enable context validation interrupt from CFC */
5250         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5251
5252         /* set the thresholds to prevent CFC/CDU race */
5253         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5254
5255         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5256
5257         if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5258                 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5259
5260         bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
5261         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5262
5263         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5264         /* Reset PCIE errors for debug */
5265         REG_WR(bp, 0x2814, 0xffffffff);
5266         REG_WR(bp, 0x3820, 0xffffffff);
5267
5268         if (CHIP_IS_E2(bp)) {
5269                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5270                            (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5271                                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5272                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5273                            (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5274                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5275                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5276                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5277                            (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5278                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5279                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5280         }
5281
5282         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5283         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5284         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5285         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5286
5287         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5288         if (!CHIP_IS_E1(bp)) {
5289                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5290                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
5291         }
5292         if (CHIP_IS_E2(bp)) {
5293                 /* Bit-map indicating which L2 hdrs may appear after the
5294                    basic Ethernet header */
5295                 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
5296         }
5297
5298         if (CHIP_REV_IS_SLOW(bp))
5299                 msleep(200);
5300
5301         /* finish CFC init */
5302         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5303         if (val != 1) {
5304                 BNX2X_ERR("CFC LL_INIT failed\n");
5305                 return -EBUSY;
5306         }
5307         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5308         if (val != 1) {
5309                 BNX2X_ERR("CFC AC_INIT failed\n");
5310                 return -EBUSY;
5311         }
5312         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5313         if (val != 1) {
5314                 BNX2X_ERR("CFC CAM_INIT failed\n");
5315                 return -EBUSY;
5316         }
5317         REG_WR(bp, CFC_REG_DEBUG0, 0);
5318
5319         if (CHIP_IS_E1(bp)) {
5320                 /* read NIG statistic
5321                    to see if this is our first up since powerup */
5322                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5323                 val = *bnx2x_sp(bp, wb_data[0]);
5324
5325                 /* do internal memory self test */
5326                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5327                         BNX2X_ERR("internal mem self test failed\n");
5328                         return -EBUSY;
5329                 }
5330         }
5331
5332         bnx2x_setup_fan_failure_detection(bp);
5333
5334         /* clear PXP2 attentions */
5335         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5336
5337         bnx2x_enable_blocks_attention(bp);
5338         if (CHIP_PARITY_ENABLED(bp))
5339                 bnx2x_enable_blocks_parity(bp);
5340
5341         if (!BP_NOMCP(bp)) {
5342                 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5343                 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5344                     CHIP_IS_E1x(bp)) {
5345                         u32 shmem_base[2], shmem2_base[2];
5346                         shmem_base[0] =  bp->common.shmem_base;
5347                         shmem2_base[0] = bp->common.shmem2_base;
5348                         if (CHIP_IS_E2(bp)) {
5349                                 shmem_base[1] =
5350                                         SHMEM2_RD(bp, other_shmem_base_addr);
5351                                 shmem2_base[1] =
5352                                         SHMEM2_RD(bp, other_shmem2_base_addr);
5353                         }
5354                         bnx2x_acquire_phy_lock(bp);
5355                         bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5356                                               bp->common.chip_id);
5357                         bnx2x_release_phy_lock(bp);
5358                 }
5359         } else
5360                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5361
5362         return 0;
5363 }
5364
5365 static int bnx2x_init_hw_port(struct bnx2x *bp)
5366 {
5367         int port = BP_PORT(bp);
5368         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5369         u32 low, high;
5370         u32 val;
5371
5372         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
5373
5374         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5375
5376         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5377         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5378
5379         /* Timers bug workaround: disables the pf_master bit in pglue at
5380          * common phase, we need to enable it here before any dmae access are
5381          * attempted. Therefore we manually added the enable-master to the
5382          * port phase (it also happens in the function phase)
5383          */
5384         if (CHIP_IS_E2(bp))
5385                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5386
5387         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5388         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5389         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5390         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5391
5392         /* QM cid (connection) count */
5393         bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
5394
5395 #ifdef BCM_CNIC
5396         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5397         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5398         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5399 #endif
5400
5401         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5402
5403         if (CHIP_MODE_IS_4_PORT(bp))
5404                 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5405
5406         if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5407                 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5408                 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5409                         /* no pause for emulation and FPGA */
5410                         low = 0;
5411                         high = 513;
5412                 } else {
5413                         if (IS_MF(bp))
5414                                 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5415                         else if (bp->dev->mtu > 4096) {
5416                                 if (bp->flags & ONE_PORT_FLAG)
5417                                         low = 160;
5418                                 else {
5419                                         val = bp->dev->mtu;
5420                                         /* (24*1024 + val*4)/256 */
5421                                         low = 96 + (val/64) +
5422                                                         ((val % 64) ? 1 : 0);
5423                                 }
5424                         } else
5425                                 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5426                         high = low + 56;        /* 14*1024/256 */
5427                 }
5428                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5429                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5430         }
5431
5432         if (CHIP_MODE_IS_4_PORT(bp)) {
5433                 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5434                 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5435                 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5436                                           BRB1_REG_MAC_GUARANTIED_0), 40);
5437         }
5438
5439         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5440
5441         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5442         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5443         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5444         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5445
5446         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5447         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5448         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5449         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5450         if (CHIP_MODE_IS_4_PORT(bp))
5451                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
5452
5453         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5454         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5455
5456         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5457
5458         if (!CHIP_IS_E2(bp)) {
5459                 /* configure PBF to work without PAUSE mtu 9000 */
5460                 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5461
5462                 /* update threshold */
5463                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5464                 /* update init credit */
5465                 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5466
5467                 /* probe changes */
5468                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5469                 udelay(50);
5470                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5471         }
5472
5473 #ifdef BCM_CNIC
5474         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5475 #endif
5476         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5477         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5478
5479         if (CHIP_IS_E1(bp)) {
5480                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5481                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5482         }
5483         bnx2x_init_block(bp, HC_BLOCK, init_stage);
5484
5485         bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5486
5487         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5488         /* init aeu_mask_attn_func_0/1:
5489          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5490          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5491          *             bits 4-7 are used for "per vn group attention" */
5492         val = IS_MF(bp) ? 0xF7 : 0x7;
5493         /* Enable DCBX attention for all but E1 */
5494         val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5495         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
5496
5497         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5498         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5499         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5500         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5501         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5502
5503         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5504
5505         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5506
5507         if (!CHIP_IS_E1(bp)) {
5508                 /* 0x2 disable mf_ov, 0x1 enable */
5509                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5510                        (IS_MF_SD(bp) ? 0x1 : 0x2));
5511
5512                 if (CHIP_IS_E2(bp)) {
5513                         val = 0;
5514                         switch (bp->mf_mode) {
5515                         case MULTI_FUNCTION_SD:
5516                                 val = 1;
5517                                 break;
5518                         case MULTI_FUNCTION_SI:
5519                                 val = 2;
5520                                 break;
5521                         }
5522
5523                         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5524                                                   NIG_REG_LLH0_CLS_TYPE), val);
5525                 }
5526                 {
5527                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5528                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5529                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5530                 }
5531         }
5532
5533         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5534         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5535         if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5536                                       bp->common.shmem2_base, port)) {
5537                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5538                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5539                 val = REG_RD(bp, reg_addr);
5540                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5541                 REG_WR(bp, reg_addr, val);
5542         }
5543         bnx2x__link_reset(bp);
5544
5545         return 0;
5546 }
5547
5548 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5549 {
5550         int reg;
5551
5552         if (CHIP_IS_E1(bp))
5553                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5554         else
5555                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5556
5557         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5558 }
5559
5560 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5561 {
5562         bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5563 }
5564
5565 static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5566 {
5567         u32 i, base = FUNC_ILT_BASE(func);
5568         for (i = base; i < base + ILT_PER_FUNC; i++)
5569                 bnx2x_ilt_wr(bp, i, 0);
5570 }
5571
5572 static int bnx2x_init_hw_func(struct bnx2x *bp)
5573 {
5574         int port = BP_PORT(bp);
5575         int func = BP_FUNC(bp);
5576         struct bnx2x_ilt *ilt = BP_ILT(bp);
5577         u16 cdu_ilt_start;
5578         u32 addr, val;
5579         u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5580         int i, main_mem_width;
5581
5582         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
5583
5584         /* set MSI reconfigure capability */
5585         if (bp->common.int_block == INT_BLOCK_HC) {
5586                 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5587                 val = REG_RD(bp, addr);
5588                 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5589                 REG_WR(bp, addr, val);
5590         }
5591
5592         ilt = BP_ILT(bp);
5593         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
5594
5595         for (i = 0; i < L2_ILT_LINES(bp); i++) {
5596                 ilt->lines[cdu_ilt_start + i].page =
5597                         bp->context.vcxt + (ILT_PAGE_CIDS * i);
5598                 ilt->lines[cdu_ilt_start + i].page_mapping =
5599                         bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5600                 /* cdu ilt pages are allocated manually so there's no need to
5601                 set the size */
5602         }
5603         bnx2x_ilt_init_op(bp, INITOP_SET);
5604
5605 #ifdef BCM_CNIC
5606         bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5607
5608         /* T1 hash bits value determines the T1 number of entries */
5609         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5610 #endif
5611
5612 #ifndef BCM_CNIC
5613         /* set NIC mode */
5614         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5615 #endif  /* BCM_CNIC */
5616
5617         if (CHIP_IS_E2(bp)) {
5618                 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5619
5620                 /* Turn on a single ISR mode in IGU if driver is going to use
5621                  * INT#x or MSI
5622                  */
5623                 if (!(bp->flags & USING_MSIX_FLAG))
5624                         pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5625                 /*
5626                  * Timers workaround bug: function init part.
5627                  * Need to wait 20msec after initializing ILT,
5628                  * needed to make sure there are no requests in
5629                  * one of the PXP internal queues with "old" ILT addresses
5630                  */
5631                 msleep(20);
5632                 /*
5633                  * Master enable - Due to WB DMAE writes performed before this
5634                  * register is re-initialized as part of the regular function
5635                  * init
5636                  */
5637                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5638                 /* Enable the function in IGU */
5639                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5640         }
5641
5642         bp->dmae_ready = 1;
5643
5644         bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5645
5646         if (CHIP_IS_E2(bp))
5647                 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5648
5649         bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5650         bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5651         bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5652         bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5653         bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5654         bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5655         bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5656         bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5657         bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5658
5659         if (CHIP_IS_E2(bp)) {
5660                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5661                                                                 BP_PATH(bp));
5662                 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5663                                                                 BP_PATH(bp));
5664         }
5665
5666         if (CHIP_MODE_IS_4_PORT(bp))
5667                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5668
5669         if (CHIP_IS_E2(bp))
5670                 REG_WR(bp, QM_REG_PF_EN, 1);
5671
5672         bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5673
5674         if (CHIP_MODE_IS_4_PORT(bp))
5675                 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5676
5677         bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5678         bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5679         bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5680         bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5681         bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5682         bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5683         bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5684         bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5685         bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5686         bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5687         bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5688         if (CHIP_IS_E2(bp))
5689                 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5690
5691         bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5692
5693         bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5694
5695         if (CHIP_IS_E2(bp))
5696                 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5697
5698         if (IS_MF(bp)) {
5699                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5700                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
5701         }
5702
5703         bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5704
5705         /* HC init per function */
5706         if (bp->common.int_block == INT_BLOCK_HC) {
5707                 if (CHIP_IS_E1H(bp)) {
5708                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5709
5710                         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5711                         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5712                 }
5713                 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5714
5715         } else {
5716                 int num_segs, sb_idx, prod_offset;
5717
5718                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5719
5720                 if (CHIP_IS_E2(bp)) {
5721                         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5722                         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5723                 }
5724
5725                 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5726
5727                 if (CHIP_IS_E2(bp)) {
5728                         int dsb_idx = 0;
5729                         /**
5730                          * Producer memory:
5731                          * E2 mode: address 0-135 match to the mapping memory;
5732                          * 136 - PF0 default prod; 137 - PF1 default prod;
5733                          * 138 - PF2 default prod; 139 - PF3 default prod;
5734                          * 140 - PF0 attn prod;    141 - PF1 attn prod;
5735                          * 142 - PF2 attn prod;    143 - PF3 attn prod;
5736                          * 144-147 reserved.
5737                          *
5738                          * E1.5 mode - In backward compatible mode;
5739                          * for non default SB; each even line in the memory
5740                          * holds the U producer and each odd line hold
5741                          * the C producer. The first 128 producers are for
5742                          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5743                          * producers are for the DSB for each PF.
5744                          * Each PF has five segments: (the order inside each
5745                          * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5746                          * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5747                          * 144-147 attn prods;
5748                          */
5749                         /* non-default-status-blocks */
5750                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5751                                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5752                         for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5753                                 prod_offset = (bp->igu_base_sb + sb_idx) *
5754                                         num_segs;
5755
5756                                 for (i = 0; i < num_segs; i++) {
5757                                         addr = IGU_REG_PROD_CONS_MEMORY +
5758                                                         (prod_offset + i) * 4;
5759                                         REG_WR(bp, addr, 0);
5760                                 }
5761                                 /* send consumer update with value 0 */
5762                                 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5763                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5764                                 bnx2x_igu_clear_sb(bp,
5765                                                    bp->igu_base_sb + sb_idx);
5766                         }
5767
5768                         /* default-status-blocks */
5769                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5770                                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5771
5772                         if (CHIP_MODE_IS_4_PORT(bp))
5773                                 dsb_idx = BP_FUNC(bp);
5774                         else
5775                                 dsb_idx = BP_E1HVN(bp);
5776
5777                         prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5778                                        IGU_BC_BASE_DSB_PROD + dsb_idx :
5779                                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
5780
5781                         for (i = 0; i < (num_segs * E1HVN_MAX);
5782                              i += E1HVN_MAX) {
5783                                 addr = IGU_REG_PROD_CONS_MEMORY +
5784                                                         (prod_offset + i)*4;
5785                                 REG_WR(bp, addr, 0);
5786                         }
5787                         /* send consumer update with 0 */
5788                         if (CHIP_INT_MODE_IS_BC(bp)) {
5789                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5790                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5791                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5792                                              CSTORM_ID, 0, IGU_INT_NOP, 1);
5793                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5794                                              XSTORM_ID, 0, IGU_INT_NOP, 1);
5795                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5796                                              TSTORM_ID, 0, IGU_INT_NOP, 1);
5797                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5798                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5799                         } else {
5800                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5801                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5802                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5803                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5804                         }
5805                         bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5806
5807                         /* !!! these should become driver const once
5808                            rf-tool supports split-68 const */
5809                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5810                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5811                         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5812                         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5813                         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5814                         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5815                 }
5816         }
5817
5818         /* Reset PCIE errors for debug */
5819         REG_WR(bp, 0x2114, 0xffffffff);
5820         REG_WR(bp, 0x2120, 0xffffffff);
5821
5822         bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5823         bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5824         bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5825         bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5826         bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5827         bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5828
5829         if (CHIP_IS_E1x(bp)) {
5830                 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5831                 main_mem_base = HC_REG_MAIN_MEMORY +
5832                                 BP_PORT(bp) * (main_mem_size * 4);
5833                 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5834                 main_mem_width = 8;
5835
5836                 val = REG_RD(bp, main_mem_prty_clr);
5837                 if (val)
5838                         DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5839                                           "block during "
5840                                           "function init (0x%x)!\n", val);
5841
5842                 /* Clear "false" parity errors in MSI-X table */
5843                 for (i = main_mem_base;
5844                      i < main_mem_base + main_mem_size * 4;
5845                      i += main_mem_width) {
5846                         bnx2x_read_dmae(bp, i, main_mem_width / 4);
5847                         bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5848                                          i, main_mem_width / 4);
5849                 }
5850                 /* Clear HC parity attention */
5851                 REG_RD(bp, main_mem_prty_clr);
5852         }
5853
5854         bnx2x_phy_probe(&bp->link_params);
5855
5856         return 0;
5857 }
5858
5859 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5860 {
5861         int rc = 0;
5862
5863         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5864            BP_ABS_FUNC(bp), load_code);
5865
5866         bp->dmae_ready = 0;
5867         spin_lock_init(&bp->dmae_lock);
5868         rc = bnx2x_gunzip_init(bp);
5869         if (rc)
5870                 return rc;
5871
5872         switch (load_code) {
5873         case FW_MSG_CODE_DRV_LOAD_COMMON:
5874         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5875                 rc = bnx2x_init_hw_common(bp, load_code);
5876                 if (rc)
5877                         goto init_hw_err;
5878                 /* no break */
5879
5880         case FW_MSG_CODE_DRV_LOAD_PORT:
5881                 rc = bnx2x_init_hw_port(bp);
5882                 if (rc)
5883                         goto init_hw_err;
5884                 /* no break */
5885
5886         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5887                 rc = bnx2x_init_hw_func(bp);
5888                 if (rc)
5889                         goto init_hw_err;
5890                 break;
5891
5892         default:
5893                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5894                 break;
5895         }
5896
5897         if (!BP_NOMCP(bp)) {
5898                 int mb_idx = BP_FW_MB_IDX(bp);
5899
5900                 bp->fw_drv_pulse_wr_seq =
5901                                 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
5902                                  DRV_PULSE_SEQ_MASK);
5903                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5904         }
5905
5906 init_hw_err:
5907         bnx2x_gunzip_end(bp);
5908
5909         return rc;
5910 }
5911
5912 void bnx2x_free_mem(struct bnx2x *bp)
5913 {
5914
5915 #define BNX2X_PCI_FREE(x, y, size) \
5916         do { \
5917                 if (x) { \
5918                         dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5919                         x = NULL; \
5920                         y = 0; \
5921                 } \
5922         } while (0)
5923
5924 #define BNX2X_FREE(x) \
5925         do { \
5926                 if (x) { \
5927                         kfree((void *)x); \
5928                         x = NULL; \
5929                 } \
5930         } while (0)
5931
5932         int i;
5933
5934         /* fastpath */
5935         /* Common */
5936         for_each_queue(bp, i) {
5937 #ifdef BCM_CNIC
5938                 /* FCoE client uses default status block */
5939                 if (IS_FCOE_IDX(i)) {
5940                         union host_hc_status_block *sb =
5941                                 &bnx2x_fp(bp, i, status_blk);
5942                         memset(sb, 0, sizeof(union host_hc_status_block));
5943                         bnx2x_fp(bp, i, status_blk_mapping) = 0;
5944                 } else {
5945 #endif
5946                 /* status blocks */
5947                 if (CHIP_IS_E2(bp))
5948                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5949                                        bnx2x_fp(bp, i, status_blk_mapping),
5950                                        sizeof(struct host_hc_status_block_e2));
5951                 else
5952                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5953                                        bnx2x_fp(bp, i, status_blk_mapping),
5954                                        sizeof(struct host_hc_status_block_e1x));
5955 #ifdef BCM_CNIC
5956                 }
5957 #endif
5958         }
5959         /* Rx */
5960         for_each_rx_queue(bp, i) {
5961
5962                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5963                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5964                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5965                                bnx2x_fp(bp, i, rx_desc_mapping),
5966                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5967
5968                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5969                                bnx2x_fp(bp, i, rx_comp_mapping),
5970                                sizeof(struct eth_fast_path_rx_cqe) *
5971                                NUM_RCQ_BD);
5972
5973                 /* SGE ring */
5974                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5975                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5976                                bnx2x_fp(bp, i, rx_sge_mapping),
5977                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5978         }
5979         /* Tx */
5980         for_each_tx_queue(bp, i) {
5981
5982                 /* fastpath tx rings: tx_buf tx_desc */
5983                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5984                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5985                                bnx2x_fp(bp, i, tx_desc_mapping),
5986                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5987         }
5988         /* end of fastpath */
5989
5990         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5991                        sizeof(struct host_sp_status_block));
5992
5993         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5994                        sizeof(struct bnx2x_slowpath));
5995
5996         BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5997                        bp->context.size);
5998
5999         bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
6000
6001         BNX2X_FREE(bp->ilt->lines);
6002
6003 #ifdef BCM_CNIC
6004         if (CHIP_IS_E2(bp))
6005                 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
6006                                sizeof(struct host_hc_status_block_e2));
6007         else
6008                 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
6009                                sizeof(struct host_hc_status_block_e1x));
6010
6011         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
6012 #endif
6013
6014         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6015
6016         BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6017                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
6018
6019 #undef BNX2X_PCI_FREE
6020 #undef BNX2X_KFREE
6021 }
6022
6023 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6024 {
6025         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6026         if (CHIP_IS_E2(bp)) {
6027                 bnx2x_fp(bp, index, sb_index_values) =
6028                         (__le16 *)status_blk.e2_sb->sb.index_values;
6029                 bnx2x_fp(bp, index, sb_running_index) =
6030                         (__le16 *)status_blk.e2_sb->sb.running_index;
6031         } else {
6032                 bnx2x_fp(bp, index, sb_index_values) =
6033                         (__le16 *)status_blk.e1x_sb->sb.index_values;
6034                 bnx2x_fp(bp, index, sb_running_index) =
6035                         (__le16 *)status_blk.e1x_sb->sb.running_index;
6036         }
6037 }
6038
6039 int bnx2x_alloc_mem(struct bnx2x *bp)
6040 {
6041 #define BNX2X_PCI_ALLOC(x, y, size) \
6042         do { \
6043                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6044                 if (x == NULL) \
6045                         goto alloc_mem_err; \
6046                 memset(x, 0, size); \
6047         } while (0)
6048
6049 #define BNX2X_ALLOC(x, size) \
6050         do { \
6051                 x = kzalloc(size, GFP_KERNEL); \
6052                 if (x == NULL) \
6053                         goto alloc_mem_err; \
6054         } while (0)
6055
6056         int i;
6057
6058         /* fastpath */
6059         /* Common */
6060         for_each_queue(bp, i) {
6061                 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
6062                 bnx2x_fp(bp, i, bp) = bp;
6063                 /* status blocks */
6064 #ifdef BCM_CNIC
6065                 if (!IS_FCOE_IDX(i)) {
6066 #endif
6067                         if (CHIP_IS_E2(bp))
6068                                 BNX2X_PCI_ALLOC(sb->e2_sb,
6069                                     &bnx2x_fp(bp, i, status_blk_mapping),
6070                                     sizeof(struct host_hc_status_block_e2));
6071                         else
6072                                 BNX2X_PCI_ALLOC(sb->e1x_sb,
6073                                     &bnx2x_fp(bp, i, status_blk_mapping),
6074                                     sizeof(struct host_hc_status_block_e1x));
6075 #ifdef BCM_CNIC
6076                 }
6077 #endif
6078                 set_sb_shortcuts(bp, i);
6079         }
6080         /* Rx */
6081         for_each_queue(bp, i) {
6082
6083                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6084                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6085                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6086                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6087                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6088                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6089
6090                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6091                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6092                                 sizeof(struct eth_fast_path_rx_cqe) *
6093                                 NUM_RCQ_BD);
6094
6095                 /* SGE ring */
6096                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6097                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6098                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6099                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6100                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6101         }
6102         /* Tx */
6103         for_each_queue(bp, i) {
6104
6105                 /* fastpath tx rings: tx_buf tx_desc */
6106                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6107                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6108                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6109                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6110                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6111         }
6112         /* end of fastpath */
6113
6114 #ifdef BCM_CNIC
6115         if (CHIP_IS_E2(bp))
6116                 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6117                                 sizeof(struct host_hc_status_block_e2));
6118         else
6119                 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6120                                 sizeof(struct host_hc_status_block_e1x));
6121
6122         /* allocate searcher T2 table */
6123         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6124 #endif
6125
6126
6127         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6128                         sizeof(struct host_sp_status_block));
6129
6130         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6131                         sizeof(struct bnx2x_slowpath));
6132
6133         bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
6134
6135         BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6136                         bp->context.size);
6137
6138         BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
6139
6140         if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6141                 goto alloc_mem_err;
6142
6143         /* Slow path ring */
6144         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6145
6146         /* EQ */
6147         BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6148                         BCM_PAGE_SIZE * NUM_EQ_PAGES);
6149         return 0;
6150
6151 alloc_mem_err:
6152         bnx2x_free_mem(bp);
6153         return -ENOMEM;
6154
6155 #undef BNX2X_PCI_ALLOC
6156 #undef BNX2X_ALLOC
6157 }
6158
6159 /*
6160  * Init service functions
6161  */
6162 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6163                              int *state_p, int flags);
6164
6165 int bnx2x_func_start(struct bnx2x *bp)
6166 {
6167         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
6168
6169         /* Wait for completion */
6170         return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6171                                  WAIT_RAMROD_COMMON);
6172 }
6173
6174 static int bnx2x_func_stop(struct bnx2x *bp)
6175 {
6176         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6177
6178         /* Wait for completion */
6179         return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6180                                       0, &(bp->state), WAIT_RAMROD_COMMON);
6181 }
6182
6183 /**
6184  * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6185  *
6186  * @param bp driver descriptor
6187  * @param set set or clear an entry (1 or 0)
6188  * @param mac pointer to a buffer containing a MAC
6189  * @param cl_bit_vec bit vector of clients to register a MAC for
6190  * @param cam_offset offset in a CAM to use
6191  * @param is_bcast is the set MAC a broadcast address (for E1 only)
6192  */
6193 static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6194                                    u32 cl_bit_vec, u8 cam_offset,
6195                                    u8 is_bcast)
6196 {
6197         struct mac_configuration_cmd *config =
6198                 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6199         int ramrod_flags = WAIT_RAMROD_COMMON;
6200
6201         bp->set_mac_pending = 1;
6202
6203         config->hdr.length = 1;
6204         config->hdr.offset = cam_offset;
6205         config->hdr.client_id = 0xff;
6206         /* Mark the single MAC configuration ramrod as opposed to a
6207          * UC/MC list configuration).
6208          */
6209         config->hdr.echo = 1;
6210
6211         /* primary MAC */
6212         config->config_table[0].msb_mac_addr =
6213                                         swab16(*(u16 *)&mac[0]);
6214         config->config_table[0].middle_mac_addr =
6215                                         swab16(*(u16 *)&mac[2]);
6216         config->config_table[0].lsb_mac_addr =
6217                                         swab16(*(u16 *)&mac[4]);
6218         config->config_table[0].clients_bit_vector =
6219                                         cpu_to_le32(cl_bit_vec);
6220         config->config_table[0].vlan_id = 0;
6221         config->config_table[0].pf_id = BP_FUNC(bp);
6222         if (set)
6223                 SET_FLAG(config->config_table[0].flags,
6224                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6225                         T_ETH_MAC_COMMAND_SET);
6226         else
6227                 SET_FLAG(config->config_table[0].flags,
6228                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6229                         T_ETH_MAC_COMMAND_INVALIDATE);
6230
6231         if (is_bcast)
6232                 SET_FLAG(config->config_table[0].flags,
6233                         MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6234
6235         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  PF_ID %d  CLID mask %d\n",
6236            (set ? "setting" : "clearing"),
6237            config->config_table[0].msb_mac_addr,
6238            config->config_table[0].middle_mac_addr,
6239            config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6240
6241         mb();
6242
6243         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6244                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6245                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6246
6247         /* Wait for a completion */
6248         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6249 }
6250
6251 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6252                              int *state_p, int flags)
6253 {
6254         /* can take a while if any port is running */
6255         int cnt = 5000;
6256         u8 poll = flags & WAIT_RAMROD_POLL;
6257         u8 common = flags & WAIT_RAMROD_COMMON;
6258
6259         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6260            poll ? "polling" : "waiting", state, idx);
6261
6262         might_sleep();
6263         while (cnt--) {
6264                 if (poll) {
6265                         if (common)
6266                                 bnx2x_eq_int(bp);
6267                         else {
6268                                 bnx2x_rx_int(bp->fp, 10);
6269                                 /* if index is different from 0
6270                                  * the reply for some commands will
6271                                  * be on the non default queue
6272                                  */
6273                                 if (idx)
6274                                         bnx2x_rx_int(&bp->fp[idx], 10);
6275                         }
6276                 }
6277
6278                 mb(); /* state is changed by bnx2x_sp_event() */
6279                 if (*state_p == state) {
6280 #ifdef BNX2X_STOP_ON_ERROR
6281                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6282 #endif
6283                         return 0;
6284                 }
6285
6286                 msleep(1);
6287
6288                 if (bp->panic)
6289                         return -EIO;
6290         }
6291
6292         /* timeout! */
6293         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6294                   poll ? "polling" : "waiting", state, idx);
6295 #ifdef BNX2X_STOP_ON_ERROR
6296         bnx2x_panic();
6297 #endif
6298
6299         return -EBUSY;
6300 }
6301
6302 static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6303 {
6304         if (CHIP_IS_E1H(bp))
6305                 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6306         else if (CHIP_MODE_IS_4_PORT(bp))
6307                 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
6308         else
6309                 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
6310 }
6311
6312 /**
6313  *  LLH CAM line allocations: currently only iSCSI and ETH macs are
6314  *  relevant. In addition, current implementation is tuned for a
6315  *  single ETH MAC.
6316  */
6317 enum {
6318         LLH_CAM_ISCSI_ETH_LINE = 0,
6319         LLH_CAM_ETH_LINE,
6320         LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6321 };
6322
6323 static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6324                           int set,
6325                           unsigned char *dev_addr,
6326                           int index)
6327 {
6328         u32 wb_data[2];
6329         u32 mem_offset, ena_offset, mem_index;
6330         /**
6331          * indexes mapping:
6332          * 0..7 - goes to MEM
6333          * 8..15 - goes to MEM2
6334          */
6335
6336         if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6337                 return;
6338
6339         /* calculate memory start offset according to the mapping
6340          * and index in the memory */
6341         if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6342                 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6343                                            NIG_REG_LLH0_FUNC_MEM;
6344                 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6345                                            NIG_REG_LLH0_FUNC_MEM_ENABLE;
6346                 mem_index = index;
6347         } else {
6348                 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6349                                            NIG_REG_P0_LLH_FUNC_MEM2;
6350                 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6351                                            NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6352                 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6353         }
6354
6355         if (set) {
6356                 /* LLH_FUNC_MEM is a u64 WB register */
6357                 mem_offset += 8*mem_index;
6358
6359                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6360                               (dev_addr[4] <<  8) |  dev_addr[5]);
6361                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
6362
6363                 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6364         }
6365
6366         /* enable/disable the entry */
6367         REG_WR(bp, ena_offset + 4*mem_index, set);
6368
6369 }
6370
6371 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6372 {
6373         u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6374                          bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6375
6376         /* networking  MAC */
6377         bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6378                                (1 << bp->fp->cl_id), cam_offset , 0);
6379
6380         bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6381
6382         if (CHIP_IS_E1(bp)) {
6383                 /* broadcast MAC */
6384                 static const u8 bcast[ETH_ALEN] = {
6385                         0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6386                 };
6387                 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6388         }
6389 }
6390
6391 static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6392 {
6393         return CHIP_REV_IS_SLOW(bp) ?
6394                 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6395                 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6396 }
6397
6398 /* set mc list, do not wait as wait implies sleep and
6399  * set_rx_mode can be invoked from non-sleepable context.
6400  *
6401  * Instead we use the same ramrod data buffer each time we need
6402  * to configure a list of addresses, and use the fact that the
6403  * list of MACs is changed in an incremental way and that the
6404  * function is called under the netif_addr_lock. A temporary
6405  * inconsistent CAM configuration (possible in case of a very fast
6406  * sequence of add/del/add on the host side) will shortly be
6407  * restored by the handler of the last ramrod.
6408  */
6409 static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
6410 {
6411         int i = 0, old;
6412         struct net_device *dev = bp->dev;
6413         u8 offset = bnx2x_e1_cam_mc_offset(bp);
6414         struct netdev_hw_addr *ha;
6415         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6416         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6417
6418         if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6419                 return -EINVAL;
6420
6421         netdev_for_each_mc_addr(ha, dev) {
6422                 /* copy mac */
6423                 config_cmd->config_table[i].msb_mac_addr =
6424                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6425                 config_cmd->config_table[i].middle_mac_addr =
6426                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6427                 config_cmd->config_table[i].lsb_mac_addr =
6428                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6429
6430                 config_cmd->config_table[i].vlan_id = 0;
6431                 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6432                 config_cmd->config_table[i].clients_bit_vector =
6433                         cpu_to_le32(1 << BP_L_ID(bp));
6434
6435                 SET_FLAG(config_cmd->config_table[i].flags,
6436                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6437                         T_ETH_MAC_COMMAND_SET);
6438
6439                 DP(NETIF_MSG_IFUP,
6440                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6441                    config_cmd->config_table[i].msb_mac_addr,
6442                    config_cmd->config_table[i].middle_mac_addr,
6443                    config_cmd->config_table[i].lsb_mac_addr);
6444                 i++;
6445         }
6446         old = config_cmd->hdr.length;
6447         if (old > i) {
6448                 for (; i < old; i++) {
6449                         if (CAM_IS_INVALID(config_cmd->
6450                                            config_table[i])) {
6451                                 /* already invalidated */
6452                                 break;
6453                         }
6454                         /* invalidate */
6455                         SET_FLAG(config_cmd->config_table[i].flags,
6456                                 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6457                                 T_ETH_MAC_COMMAND_INVALIDATE);
6458                 }
6459         }
6460
6461         wmb();
6462
6463         config_cmd->hdr.length = i;
6464         config_cmd->hdr.offset = offset;
6465         config_cmd->hdr.client_id = 0xff;
6466         /* Mark that this ramrod doesn't use bp->set_mac_pending for
6467          * synchronization.
6468          */
6469         config_cmd->hdr.echo = 0;
6470
6471         mb();
6472
6473         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6474                    U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6475 }
6476
6477 void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
6478 {
6479         int i;
6480         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6481         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6482         int ramrod_flags = WAIT_RAMROD_COMMON;
6483         u8 offset = bnx2x_e1_cam_mc_offset(bp);
6484
6485         for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
6486                 SET_FLAG(config_cmd->config_table[i].flags,
6487                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6488                         T_ETH_MAC_COMMAND_INVALIDATE);
6489
6490         wmb();
6491
6492         config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6493         config_cmd->hdr.offset = offset;
6494         config_cmd->hdr.client_id = 0xff;
6495         /* We'll wait for a completion this time... */
6496         config_cmd->hdr.echo = 1;
6497
6498         bp->set_mac_pending = 1;
6499
6500         mb();
6501
6502         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6503                       U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6504
6505         /* Wait for a completion */
6506         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6507                                 ramrod_flags);
6508
6509 }
6510
6511 /* Accept one or more multicasts */
6512 static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6513 {
6514         struct net_device *dev = bp->dev;
6515         struct netdev_hw_addr *ha;
6516         u32 mc_filter[MC_HASH_SIZE];
6517         u32 crc, bit, regidx;
6518         int i;
6519
6520         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6521
6522         netdev_for_each_mc_addr(ha, dev) {
6523                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6524                    bnx2x_mc_addr(ha));
6525
6526                 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6527                                 ETH_ALEN);
6528                 bit = (crc >> 24) & 0xff;
6529                 regidx = bit >> 5;
6530                 bit &= 0x1f;
6531                 mc_filter[regidx] |= (1 << bit);
6532         }
6533
6534         for (i = 0; i < MC_HASH_SIZE; i++)
6535                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6536                        mc_filter[i]);
6537
6538         return 0;
6539 }
6540
6541 void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6542 {
6543         int i;
6544
6545         for (i = 0; i < MC_HASH_SIZE; i++)
6546                 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6547 }
6548
6549 #ifdef BCM_CNIC
6550 /**
6551  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6552  * MAC(s). This function will wait until the ramdord completion
6553  * returns.
6554  *
6555  * @param bp driver handle
6556  * @param set set or clear the CAM entry
6557  *
6558  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6559  */
6560 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6561 {
6562         u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6563                          bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6564         u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6565                 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6566         u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6567         u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
6568
6569         /* Send a SET_MAC ramrod */
6570         bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
6571                                cam_offset, 0);
6572
6573         bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6574
6575         return 0;
6576 }
6577
6578 /**
6579  * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6580  * ETH MAC(s). This function will wait until the ramdord
6581  * completion returns.
6582  *
6583  * @param bp driver handle
6584  * @param set set or clear the CAM entry
6585  *
6586  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6587  */
6588 int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6589 {
6590         u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6591         /**
6592          * CAM allocation for E1H
6593          * eth unicasts: by func number
6594          * iscsi: by func number
6595          * fip unicast: by func number
6596          * fip multicast: by func number
6597          */
6598         bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6599                 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6600
6601         return 0;
6602 }
6603
6604 int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6605 {
6606         u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6607
6608         /**
6609          * CAM allocation for E1H
6610          * eth unicasts: by func number
6611          * iscsi: by func number
6612          * fip unicast: by func number
6613          * fip multicast: by func number
6614          */
6615         bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6616                 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6617
6618         return 0;
6619 }
6620 #endif
6621
6622 static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6623                                     struct bnx2x_client_init_params *params,
6624                                     u8 activate,
6625                                     struct client_init_ramrod_data *data)
6626 {
6627         /* Clear the buffer */
6628         memset(data, 0, sizeof(*data));
6629
6630         /* general */
6631         data->general.client_id = params->rxq_params.cl_id;
6632         data->general.statistics_counter_id = params->rxq_params.stat_id;
6633         data->general.statistics_en_flg =
6634                 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6635         data->general.is_fcoe_flg =
6636                 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
6637         data->general.activate_flg = activate;
6638         data->general.sp_client_id = params->rxq_params.spcl_id;
6639
6640         /* Rx data */
6641         data->rx.tpa_en_flg =
6642                 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6643         data->rx.vmqueue_mode_en_flg = 0;
6644         data->rx.cache_line_alignment_log_size =
6645                 params->rxq_params.cache_line_log;
6646         data->rx.enable_dynamic_hc =
6647                 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6648         data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6649         data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6650         data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6651
6652         /* We don't set drop flags */
6653         data->rx.drop_ip_cs_err_flg = 0;
6654         data->rx.drop_tcp_cs_err_flg = 0;
6655         data->rx.drop_ttl0_flg = 0;
6656         data->rx.drop_udp_cs_err_flg = 0;
6657
6658         data->rx.inner_vlan_removal_enable_flg =
6659                 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6660         data->rx.outer_vlan_removal_enable_flg =
6661                 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6662         data->rx.status_block_id = params->rxq_params.fw_sb_id;
6663         data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6664         data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6665         data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6666         data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6667         data->rx.bd_page_base.lo =
6668                 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6669         data->rx.bd_page_base.hi =
6670                 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6671         data->rx.sge_page_base.lo =
6672                 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6673         data->rx.sge_page_base.hi =
6674                 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6675         data->rx.cqe_page_base.lo =
6676                 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6677         data->rx.cqe_page_base.hi =
6678                 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6679         data->rx.is_leading_rss =
6680                 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6681         data->rx.is_approx_mcast = data->rx.is_leading_rss;
6682
6683         /* Tx data */
6684         data->tx.enforce_security_flg = 0; /* VF specific */
6685         data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6686         data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6687         data->tx.mtu = 0; /* VF specific */
6688         data->tx.tx_bd_page_base.lo =
6689                 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6690         data->tx.tx_bd_page_base.hi =
6691                 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6692
6693         /* flow control data */
6694         data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6695         data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6696         data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6697         data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6698         data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6699         data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6700         data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6701
6702         data->fc.safc_group_num = params->txq_params.cos;
6703         data->fc.safc_group_en_flg =
6704                 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6705         data->fc.traffic_type =
6706                 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6707                 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
6708 }
6709
6710 static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6711 {
6712         /* ustorm cxt validation */
6713         cxt->ustorm_ag_context.cdu_usage =
6714                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6715                                        ETH_CONNECTION_TYPE);
6716         /* xcontext validation */
6717         cxt->xstorm_ag_context.cdu_reserved =
6718                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6719                                        ETH_CONNECTION_TYPE);
6720 }
6721
6722 static int bnx2x_setup_fw_client(struct bnx2x *bp,
6723                                  struct bnx2x_client_init_params *params,
6724                                  u8 activate,
6725                                  struct client_init_ramrod_data *data,
6726                                  dma_addr_t data_mapping)
6727 {
6728         u16 hc_usec;
6729         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6730         int ramrod_flags = 0, rc;
6731
6732         /* HC and context validation values */
6733         hc_usec = params->txq_params.hc_rate ?
6734                 1000000 / params->txq_params.hc_rate : 0;
6735         bnx2x_update_coalesce_sb_index(bp,
6736                         params->txq_params.fw_sb_id,
6737                         params->txq_params.sb_cq_index,
6738                         !(params->txq_params.flags & QUEUE_FLG_HC),
6739                         hc_usec);
6740
6741         *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6742
6743         hc_usec = params->rxq_params.hc_rate ?
6744                 1000000 / params->rxq_params.hc_rate : 0;
6745         bnx2x_update_coalesce_sb_index(bp,
6746                         params->rxq_params.fw_sb_id,
6747                         params->rxq_params.sb_cq_index,
6748                         !(params->rxq_params.flags & QUEUE_FLG_HC),
6749                         hc_usec);
6750
6751         bnx2x_set_ctx_validation(params->rxq_params.cxt,
6752                                  params->rxq_params.cid);
6753
6754         /* zero stats */
6755         if (params->txq_params.flags & QUEUE_FLG_STATS)
6756                 storm_memset_xstats_zero(bp, BP_PORT(bp),
6757                                          params->txq_params.stat_id);
6758
6759         if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6760                 storm_memset_ustats_zero(bp, BP_PORT(bp),
6761                                          params->rxq_params.stat_id);
6762                 storm_memset_tstats_zero(bp, BP_PORT(bp),
6763                                          params->rxq_params.stat_id);
6764         }
6765
6766         /* Fill the ramrod data */
6767         bnx2x_fill_cl_init_data(bp, params, activate, data);
6768
6769         /* SETUP ramrod.
6770          *
6771          * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6772          * barrier except from mmiowb() is needed to impose a
6773          * proper ordering of memory operations.
6774          */
6775         mmiowb();
6776
6777
6778         bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6779                       U64_HI(data_mapping), U64_LO(data_mapping), 0);
6780
6781         /* Wait for completion */
6782         rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6783                                  params->ramrod_params.index,
6784                                  params->ramrod_params.pstate,
6785                                  ramrod_flags);
6786         return rc;
6787 }
6788
6789 /**
6790  * Configure interrupt mode according to current configuration.
6791  * In case of MSI-X it will also try to enable MSI-X.
6792  *
6793  * @param bp
6794  *
6795  * @return int
6796  */
6797 static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6798 {
6799         int rc = 0;
6800
6801         switch (bp->int_mode) {
6802         case INT_MODE_MSI:
6803                 bnx2x_enable_msi(bp);
6804                 /* falling through... */
6805         case INT_MODE_INTx:
6806                 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6807                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6808                 break;
6809         default:
6810                 /* Set number of queues according to bp->multi_mode value */
6811                 bnx2x_set_num_queues(bp);
6812
6813                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6814                    bp->num_queues);
6815
6816                 /* if we can't use MSI-X we only need one fp,
6817                  * so try to enable MSI-X with the requested number of fp's
6818                  * and fallback to MSI or legacy INTx with one fp
6819                  */
6820                 rc = bnx2x_enable_msix(bp);
6821                 if (rc) {
6822                         /* failed to enable MSI-X */
6823                         if (bp->multi_mode)
6824                                 DP(NETIF_MSG_IFUP,
6825                                           "Multi requested but failed to "
6826                                           "enable MSI-X (%d), "
6827                                           "set number of queues to %d\n",
6828                                    bp->num_queues,
6829                                    1 + NONE_ETH_CONTEXT_USE);
6830                         bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6831
6832                         if (!(bp->flags & DISABLE_MSI_FLAG))
6833                                 bnx2x_enable_msi(bp);
6834                 }
6835
6836                 break;
6837         }
6838
6839         return rc;
6840 }
6841
6842 /* must be called prioir to any HW initializations */
6843 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6844 {
6845         return L2_ILT_LINES(bp);
6846 }
6847
6848 void bnx2x_ilt_set_info(struct bnx2x *bp)
6849 {
6850         struct ilt_client_info *ilt_client;
6851         struct bnx2x_ilt *ilt = BP_ILT(bp);
6852         u16 line = 0;
6853
6854         ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6855         DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6856
6857         /* CDU */
6858         ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6859         ilt_client->client_num = ILT_CLIENT_CDU;
6860         ilt_client->page_size = CDU_ILT_PAGE_SZ;
6861         ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6862         ilt_client->start = line;
6863         line += L2_ILT_LINES(bp);
6864 #ifdef BCM_CNIC
6865         line += CNIC_ILT_LINES;
6866 #endif
6867         ilt_client->end = line - 1;
6868
6869         DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6870                                          "flags 0x%x, hw psz %d\n",
6871            ilt_client->start,
6872            ilt_client->end,
6873            ilt_client->page_size,
6874            ilt_client->flags,
6875            ilog2(ilt_client->page_size >> 12));
6876
6877         /* QM */
6878         if (QM_INIT(bp->qm_cid_count)) {
6879                 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6880                 ilt_client->client_num = ILT_CLIENT_QM;
6881                 ilt_client->page_size = QM_ILT_PAGE_SZ;
6882                 ilt_client->flags = 0;
6883                 ilt_client->start = line;
6884
6885                 /* 4 bytes for each cid */
6886                 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6887                                                          QM_ILT_PAGE_SZ);
6888
6889                 ilt_client->end = line - 1;
6890
6891                 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6892                                                  "flags 0x%x, hw psz %d\n",
6893                    ilt_client->start,
6894                    ilt_client->end,
6895                    ilt_client->page_size,
6896                    ilt_client->flags,
6897                    ilog2(ilt_client->page_size >> 12));
6898
6899         }
6900         /* SRC */
6901         ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6902 #ifdef BCM_CNIC
6903         ilt_client->client_num = ILT_CLIENT_SRC;
6904         ilt_client->page_size = SRC_ILT_PAGE_SZ;
6905         ilt_client->flags = 0;
6906         ilt_client->start = line;
6907         line += SRC_ILT_LINES;
6908         ilt_client->end = line - 1;
6909
6910         DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6911                                          "flags 0x%x, hw psz %d\n",
6912            ilt_client->start,
6913            ilt_client->end,
6914            ilt_client->page_size,
6915            ilt_client->flags,
6916            ilog2(ilt_client->page_size >> 12));
6917
6918 #else
6919         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6920 #endif
6921
6922         /* TM */
6923         ilt_client = &ilt->clients[ILT_CLIENT_TM];
6924 #ifdef BCM_CNIC
6925         ilt_client->client_num = ILT_CLIENT_TM;
6926         ilt_client->page_size = TM_ILT_PAGE_SZ;
6927         ilt_client->flags = 0;
6928         ilt_client->start = line;
6929         line += TM_ILT_LINES;
6930         ilt_client->end = line - 1;
6931
6932         DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6933                                          "flags 0x%x, hw psz %d\n",
6934            ilt_client->start,
6935            ilt_client->end,
6936            ilt_client->page_size,
6937            ilt_client->flags,
6938            ilog2(ilt_client->page_size >> 12));
6939
6940 #else
6941         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6942 #endif
6943 }
6944
6945 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6946                        int is_leading)
6947 {
6948         struct bnx2x_client_init_params params = { {0} };
6949         int rc;
6950
6951         /* reset IGU state skip FCoE L2 queue */
6952         if (!IS_FCOE_FP(fp))
6953                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6954                              IGU_INT_ENABLE, 0);
6955
6956         params.ramrod_params.pstate = &fp->state;
6957         params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6958         params.ramrod_params.index = fp->index;
6959         params.ramrod_params.cid = fp->cid;
6960
6961 #ifdef BCM_CNIC
6962         if (IS_FCOE_FP(fp))
6963                 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6964
6965 #endif
6966
6967         if (is_leading)
6968                 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6969
6970         bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6971
6972         bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6973
6974         rc = bnx2x_setup_fw_client(bp, &params, 1,
6975                                      bnx2x_sp(bp, client_init_data),
6976                                      bnx2x_sp_mapping(bp, client_init_data));
6977         return rc;
6978 }
6979
6980 static int bnx2x_stop_fw_client(struct bnx2x *bp,
6981                                 struct bnx2x_client_ramrod_params *p)
6982 {
6983         int rc;
6984
6985         int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6986
6987         /* halt the connection */
6988         *p->pstate = BNX2X_FP_STATE_HALTING;
6989         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6990                                                   p->cl_id, 0);
6991
6992         /* Wait for completion */
6993         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6994                                p->pstate, poll_flag);
6995         if (rc) /* timeout */
6996                 return rc;
6997
6998         *p->pstate = BNX2X_FP_STATE_TERMINATING;
6999         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
7000                                                        p->cl_id, 0);
7001         /* Wait for completion */
7002         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
7003                                p->pstate, poll_flag);
7004         if (rc) /* timeout */
7005                 return rc;
7006
7007
7008         /* delete cfc entry */
7009         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
7010
7011         /* Wait for completion */
7012         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
7013                                p->pstate, WAIT_RAMROD_COMMON);
7014         return rc;
7015 }
7016
7017 static int bnx2x_stop_client(struct bnx2x *bp, int index)
7018 {
7019         struct bnx2x_client_ramrod_params client_stop = {0};
7020         struct bnx2x_fastpath *fp = &bp->fp[index];
7021
7022         client_stop.index = index;
7023         client_stop.cid = fp->cid;
7024         client_stop.cl_id = fp->cl_id;
7025         client_stop.pstate = &(fp->state);
7026         client_stop.poll = 0;
7027
7028         return bnx2x_stop_fw_client(bp, &client_stop);
7029 }
7030
7031
7032 static void bnx2x_reset_func(struct bnx2x *bp)
7033 {
7034         int port = BP_PORT(bp);
7035         int func = BP_FUNC(bp);
7036         int i;
7037         int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
7038                         (CHIP_IS_E2(bp) ?
7039                          offsetof(struct hc_status_block_data_e2, common) :
7040                          offsetof(struct hc_status_block_data_e1x, common));
7041         int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
7042         int pfid_offset = offsetof(struct pci_entity, pf_id);
7043
7044         /* Disable the function in the FW */
7045         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
7046         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
7047         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
7048         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
7049
7050         /* FP SBs */
7051         for_each_eth_queue(bp, i) {
7052                 struct bnx2x_fastpath *fp = &bp->fp[i];
7053                 REG_WR8(bp,
7054                         BAR_CSTRORM_INTMEM +
7055                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
7056                         + pfunc_offset_fp + pfid_offset,
7057                         HC_FUNCTION_DISABLED);
7058         }
7059
7060         /* SP SB */
7061         REG_WR8(bp,
7062                 BAR_CSTRORM_INTMEM +
7063                 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
7064                 pfunc_offset_sp + pfid_offset,
7065                 HC_FUNCTION_DISABLED);
7066
7067
7068         for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
7069                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
7070                        0);
7071
7072         /* Configure IGU */
7073         if (bp->common.int_block == INT_BLOCK_HC) {
7074                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7075                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7076         } else {
7077                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7078                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7079         }
7080
7081 #ifdef BCM_CNIC
7082         /* Disable Timer scan */
7083         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7084         /*
7085          * Wait for at least 10ms and up to 2 second for the timers scan to
7086          * complete
7087          */
7088         for (i = 0; i < 200; i++) {
7089                 msleep(10);
7090                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7091                         break;
7092         }
7093 #endif
7094         /* Clear ILT */
7095         bnx2x_clear_func_ilt(bp, func);
7096
7097         /* Timers workaround bug for E2: if this is vnic-3,
7098          * we need to set the entire ilt range for this timers.
7099          */
7100         if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
7101                 struct ilt_client_info ilt_cli;
7102                 /* use dummy TM client */
7103                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7104                 ilt_cli.start = 0;
7105                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7106                 ilt_cli.client_num = ILT_CLIENT_TM;
7107
7108                 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7109         }
7110
7111         /* this assumes that reset_port() called before reset_func()*/
7112         if (CHIP_IS_E2(bp))
7113                 bnx2x_pf_disable(bp);
7114
7115         bp->dmae_ready = 0;
7116 }
7117
7118 static void bnx2x_reset_port(struct bnx2x *bp)
7119 {
7120         int port = BP_PORT(bp);
7121         u32 val;
7122
7123         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7124
7125         /* Do not rcv packets to BRB */
7126         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7127         /* Do not direct rcv packets that are not for MCP to the BRB */
7128         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7129                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7130
7131         /* Configure AEU */
7132         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7133
7134         msleep(100);
7135         /* Check for BRB port occupancy */
7136         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7137         if (val)
7138                 DP(NETIF_MSG_IFDOWN,
7139                    "BRB1 is not empty  %d blocks are occupied\n", val);
7140
7141         /* TODO: Close Doorbell port? */
7142 }
7143
7144 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7145 {
7146         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7147            BP_ABS_FUNC(bp), reset_code);
7148
7149         switch (reset_code) {
7150         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7151                 bnx2x_reset_port(bp);
7152                 bnx2x_reset_func(bp);
7153                 bnx2x_reset_common(bp);
7154                 break;
7155
7156         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7157                 bnx2x_reset_port(bp);
7158                 bnx2x_reset_func(bp);
7159                 break;
7160
7161         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7162                 bnx2x_reset_func(bp);
7163                 break;
7164
7165         default:
7166                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7167                 break;
7168         }
7169 }
7170
7171 #ifdef BCM_CNIC
7172 static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7173 {
7174         if (bp->flags & FCOE_MACS_SET) {
7175                 if (!IS_MF_SD(bp))
7176                         bnx2x_set_fip_eth_mac_addr(bp, 0);
7177
7178                 bnx2x_set_all_enode_macs(bp, 0);
7179
7180                 bp->flags &= ~FCOE_MACS_SET;
7181         }
7182 }
7183 #endif
7184
7185 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7186 {
7187         int port = BP_PORT(bp);
7188         u32 reset_code = 0;
7189         int i, cnt, rc;
7190
7191         /* Wait until tx fastpath tasks complete */
7192         for_each_tx_queue(bp, i) {
7193                 struct bnx2x_fastpath *fp = &bp->fp[i];
7194
7195                 cnt = 1000;
7196                 while (bnx2x_has_tx_work_unload(fp)) {
7197
7198                         if (!cnt) {
7199                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7200                                           i);
7201 #ifdef BNX2X_STOP_ON_ERROR
7202                                 bnx2x_panic();
7203                                 return -EBUSY;
7204 #else
7205                                 break;
7206 #endif
7207                         }
7208                         cnt--;
7209                         msleep(1);
7210                 }
7211         }
7212         /* Give HW time to discard old tx messages */
7213         msleep(1);
7214
7215         bnx2x_set_eth_mac(bp, 0);
7216
7217         bnx2x_invalidate_uc_list(bp);
7218
7219         if (CHIP_IS_E1(bp))
7220                 bnx2x_invalidate_e1_mc_list(bp);
7221         else {
7222                 bnx2x_invalidate_e1h_mc_list(bp);
7223                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7224         }
7225
7226 #ifdef BCM_CNIC
7227         bnx2x_del_fcoe_eth_macs(bp);
7228 #endif
7229
7230         if (unload_mode == UNLOAD_NORMAL)
7231                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7232
7233         else if (bp->flags & NO_WOL_FLAG)
7234                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7235
7236         else if (bp->wol) {
7237                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7238                 u8 *mac_addr = bp->dev->dev_addr;
7239                 u32 val;
7240                 /* The mac address is written to entries 1-4 to
7241                    preserve entry 0 which is used by the PMF */
7242                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7243
7244                 val = (mac_addr[0] << 8) | mac_addr[1];
7245                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7246
7247                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7248                       (mac_addr[4] << 8) | mac_addr[5];
7249                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7250
7251                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7252
7253         } else
7254                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7255
7256         /* Close multi and leading connections
7257            Completions for ramrods are collected in a synchronous way */
7258         for_each_queue(bp, i)
7259
7260                 if (bnx2x_stop_client(bp, i))
7261 #ifdef BNX2X_STOP_ON_ERROR
7262                         return;
7263 #else
7264                         goto unload_error;
7265 #endif
7266
7267         rc = bnx2x_func_stop(bp);
7268         if (rc) {
7269                 BNX2X_ERR("Function stop failed!\n");
7270 #ifdef BNX2X_STOP_ON_ERROR
7271                 return;
7272 #else
7273                 goto unload_error;
7274 #endif
7275         }
7276 #ifndef BNX2X_STOP_ON_ERROR
7277 unload_error:
7278 #endif
7279         if (!BP_NOMCP(bp))
7280                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
7281         else {
7282                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      "
7283                                      "%d, %d, %d\n", BP_PATH(bp),
7284                    load_count[BP_PATH(bp)][0],
7285                    load_count[BP_PATH(bp)][1],
7286                    load_count[BP_PATH(bp)][2]);
7287                 load_count[BP_PATH(bp)][0]--;
7288                 load_count[BP_PATH(bp)][1 + port]--;
7289                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  "
7290                                      "%d, %d, %d\n", BP_PATH(bp),
7291                    load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7292                    load_count[BP_PATH(bp)][2]);
7293                 if (load_count[BP_PATH(bp)][0] == 0)
7294                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7295                 else if (load_count[BP_PATH(bp)][1 + port] == 0)
7296                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7297                 else
7298                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7299         }
7300
7301         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7302             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7303                 bnx2x__link_reset(bp);
7304
7305         /* Disable HW interrupts, NAPI */
7306         bnx2x_netif_stop(bp, 1);
7307
7308         /* Release IRQs */
7309         bnx2x_free_irq(bp);
7310
7311         /* Reset the chip */
7312         bnx2x_reset_chip(bp, reset_code);
7313
7314         /* Report UNLOAD_DONE to MCP */
7315         if (!BP_NOMCP(bp))
7316                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7317
7318 }
7319
7320 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
7321 {
7322         u32 val;
7323
7324         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7325
7326         if (CHIP_IS_E1(bp)) {
7327                 int port = BP_PORT(bp);
7328                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7329                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
7330
7331                 val = REG_RD(bp, addr);
7332                 val &= ~(0x300);
7333                 REG_WR(bp, addr, val);
7334         } else if (CHIP_IS_E1H(bp)) {
7335                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7336                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7337                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7338                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7339         }
7340 }
7341
7342 /* Close gates #2, #3 and #4: */
7343 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7344 {
7345         u32 val, addr;
7346
7347         /* Gates #2 and #4a are closed/opened for "not E1" only */
7348         if (!CHIP_IS_E1(bp)) {
7349                 /* #4 */
7350                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7351                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7352                        close ? (val | 0x1) : (val & (~(u32)1)));
7353                 /* #2 */
7354                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7355                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7356                        close ? (val | 0x1) : (val & (~(u32)1)));
7357         }
7358
7359         /* #3 */
7360         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7361         val = REG_RD(bp, addr);
7362         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7363
7364         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7365                 close ? "closing" : "opening");
7366         mmiowb();
7367 }
7368
7369 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
7370
7371 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7372 {
7373         /* Do some magic... */
7374         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7375         *magic_val = val & SHARED_MF_CLP_MAGIC;
7376         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7377 }
7378
7379 /* Restore the value of the `magic' bit.
7380  *
7381  * @param pdev Device handle.
7382  * @param magic_val Old value of the `magic' bit.
7383  */
7384 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7385 {
7386         /* Restore the `magic' bit value... */
7387         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7388         MF_CFG_WR(bp, shared_mf_config.clp_mb,
7389                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7390 }
7391
7392 /**
7393  * Prepares for MCP reset: takes care of CLP configurations.
7394  *
7395  * @param bp
7396  * @param magic_val Old value of 'magic' bit.
7397  */
7398 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7399 {
7400         u32 shmem;
7401         u32 validity_offset;
7402
7403         DP(NETIF_MSG_HW, "Starting\n");
7404
7405         /* Set `magic' bit in order to save MF config */
7406         if (!CHIP_IS_E1(bp))
7407                 bnx2x_clp_reset_prep(bp, magic_val);
7408
7409         /* Get shmem offset */
7410         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7411         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7412
7413         /* Clear validity map flags */
7414         if (shmem > 0)
7415                 REG_WR(bp, shmem + validity_offset, 0);
7416 }
7417
7418 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
7419 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
7420
7421 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7422  * depending on the HW type.
7423  *
7424  * @param bp
7425  */
7426 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7427 {
7428         /* special handling for emulation and FPGA,
7429            wait 10 times longer */
7430         if (CHIP_REV_IS_SLOW(bp))
7431                 msleep(MCP_ONE_TIMEOUT*10);
7432         else
7433                 msleep(MCP_ONE_TIMEOUT);
7434 }
7435
7436 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7437 {
7438         u32 shmem, cnt, validity_offset, val;
7439         int rc = 0;
7440
7441         msleep(100);
7442
7443         /* Get shmem offset */
7444         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7445         if (shmem == 0) {
7446                 BNX2X_ERR("Shmem 0 return failure\n");
7447                 rc = -ENOTTY;
7448                 goto exit_lbl;
7449         }
7450
7451         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7452
7453         /* Wait for MCP to come up */
7454         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7455                 /* TBD: its best to check validity map of last port.
7456                  * currently checks on port 0.
7457                  */
7458                 val = REG_RD(bp, shmem + validity_offset);
7459                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7460                    shmem + validity_offset, val);
7461
7462                 /* check that shared memory is valid. */
7463                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7464                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7465                         break;
7466
7467                 bnx2x_mcp_wait_one(bp);
7468         }
7469
7470         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7471
7472         /* Check that shared memory is valid. This indicates that MCP is up. */
7473         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7474             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7475                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7476                 rc = -ENOTTY;
7477                 goto exit_lbl;
7478         }
7479
7480 exit_lbl:
7481         /* Restore the `magic' bit value */
7482         if (!CHIP_IS_E1(bp))
7483                 bnx2x_clp_reset_done(bp, magic_val);
7484
7485         return rc;
7486 }
7487
7488 static void bnx2x_pxp_prep(struct bnx2x *bp)
7489 {
7490         if (!CHIP_IS_E1(bp)) {
7491                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7492                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7493                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7494                 mmiowb();
7495         }
7496 }
7497
7498 /*
7499  * Reset the whole chip except for:
7500  *      - PCIE core
7501  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7502  *              one reset bit)
7503  *      - IGU
7504  *      - MISC (including AEU)
7505  *      - GRC
7506  *      - RBCN, RBCP
7507  */
7508 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7509 {
7510         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7511
7512         not_reset_mask1 =
7513                 MISC_REGISTERS_RESET_REG_1_RST_HC |
7514                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7515                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7516
7517         not_reset_mask2 =
7518                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7519                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7520                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7521                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7522                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7523                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
7524                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7525                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7526
7527         reset_mask1 = 0xffffffff;
7528
7529         if (CHIP_IS_E1(bp))
7530                 reset_mask2 = 0xffff;
7531         else
7532                 reset_mask2 = 0x1ffff;
7533
7534         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7535                reset_mask1 & (~not_reset_mask1));
7536         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7537                reset_mask2 & (~not_reset_mask2));
7538
7539         barrier();
7540         mmiowb();
7541
7542         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7543         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7544         mmiowb();
7545 }
7546
7547 static int bnx2x_process_kill(struct bnx2x *bp)
7548 {
7549         int cnt = 1000;
7550         u32 val = 0;
7551         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7552
7553
7554         /* Empty the Tetris buffer, wait for 1s */
7555         do {
7556                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7557                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7558                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7559                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7560                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7561                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7562                     ((port_is_idle_0 & 0x1) == 0x1) &&
7563                     ((port_is_idle_1 & 0x1) == 0x1) &&
7564                     (pgl_exp_rom2 == 0xffffffff))
7565                         break;
7566                 msleep(1);
7567         } while (cnt-- > 0);
7568
7569         if (cnt <= 0) {
7570                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7571                           " are still"
7572                           " outstanding read requests after 1s!\n");
7573                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7574                           " port_is_idle_0=0x%08x,"
7575                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7576                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7577                           pgl_exp_rom2);
7578                 return -EAGAIN;
7579         }
7580
7581         barrier();
7582
7583         /* Close gates #2, #3 and #4 */
7584         bnx2x_set_234_gates(bp, true);
7585
7586         /* TBD: Indicate that "process kill" is in progress to MCP */
7587
7588         /* Clear "unprepared" bit */
7589         REG_WR(bp, MISC_REG_UNPREPARED, 0);
7590         barrier();
7591
7592         /* Make sure all is written to the chip before the reset */
7593         mmiowb();
7594
7595         /* Wait for 1ms to empty GLUE and PCI-E core queues,
7596          * PSWHST, GRC and PSWRD Tetris buffer.
7597          */
7598         msleep(1);
7599
7600         /* Prepare to chip reset: */
7601         /* MCP */
7602         bnx2x_reset_mcp_prep(bp, &val);
7603
7604         /* PXP */
7605         bnx2x_pxp_prep(bp);
7606         barrier();
7607
7608         /* reset the chip */
7609         bnx2x_process_kill_chip_reset(bp);
7610         barrier();
7611
7612         /* Recover after reset: */
7613         /* MCP */
7614         if (bnx2x_reset_mcp_comp(bp, val))
7615                 return -EAGAIN;
7616
7617         /* PXP */
7618         bnx2x_pxp_prep(bp);
7619
7620         /* Open the gates #2, #3 and #4 */
7621         bnx2x_set_234_gates(bp, false);
7622
7623         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7624          * reset state, re-enable attentions. */
7625
7626         return 0;
7627 }
7628
7629 static int bnx2x_leader_reset(struct bnx2x *bp)
7630 {
7631         int rc = 0;
7632         /* Try to recover after the failure */
7633         if (bnx2x_process_kill(bp)) {
7634                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7635                        bp->dev->name);
7636                 rc = -EAGAIN;
7637                 goto exit_leader_reset;
7638         }
7639
7640         /* Clear "reset is in progress" bit and update the driver state */
7641         bnx2x_set_reset_done(bp);
7642         bp->recovery_state = BNX2X_RECOVERY_DONE;
7643
7644 exit_leader_reset:
7645         bp->is_leader = 0;
7646         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7647         smp_wmb();
7648         return rc;
7649 }
7650
7651 /* Assumption: runs under rtnl lock. This together with the fact
7652  * that it's called only from bnx2x_reset_task() ensure that it
7653  * will never be called when netif_running(bp->dev) is false.
7654  */
7655 static void bnx2x_parity_recover(struct bnx2x *bp)
7656 {
7657         DP(NETIF_MSG_HW, "Handling parity\n");
7658         while (1) {
7659                 switch (bp->recovery_state) {
7660                 case BNX2X_RECOVERY_INIT:
7661                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7662                         /* Try to get a LEADER_LOCK HW lock */
7663                         if (bnx2x_trylock_hw_lock(bp,
7664                                 HW_LOCK_RESOURCE_RESERVED_08))
7665                                 bp->is_leader = 1;
7666
7667                         /* Stop the driver */
7668                         /* If interface has been removed - break */
7669                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7670                                 return;
7671
7672                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
7673                         /* Ensure "is_leader" and "recovery_state"
7674                          *  update values are seen on other CPUs
7675                          */
7676                         smp_wmb();
7677                         break;
7678
7679                 case BNX2X_RECOVERY_WAIT:
7680                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7681                         if (bp->is_leader) {
7682                                 u32 load_counter = bnx2x_get_load_cnt(bp);
7683                                 if (load_counter) {
7684                                         /* Wait until all other functions get
7685                                          * down.
7686                                          */
7687                                         schedule_delayed_work(&bp->reset_task,
7688                                                                 HZ/10);
7689                                         return;
7690                                 } else {
7691                                         /* If all other functions got down -
7692                                          * try to bring the chip back to
7693                                          * normal. In any case it's an exit
7694                                          * point for a leader.
7695                                          */
7696                                         if (bnx2x_leader_reset(bp) ||
7697                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
7698                                                 printk(KERN_ERR"%s: Recovery "
7699                                                 "has failed. Power cycle is "
7700                                                 "needed.\n", bp->dev->name);
7701                                                 /* Disconnect this device */
7702                                                 netif_device_detach(bp->dev);
7703                                                 /* Block ifup for all function
7704                                                  * of this ASIC until
7705                                                  * "process kill" or power
7706                                                  * cycle.
7707                                                  */
7708                                                 bnx2x_set_reset_in_progress(bp);
7709                                                 /* Shut down the power */
7710                                                 bnx2x_set_power_state(bp,
7711                                                                 PCI_D3hot);
7712                                                 return;
7713                                         }
7714
7715                                         return;
7716                                 }
7717                         } else { /* non-leader */
7718                                 if (!bnx2x_reset_is_done(bp)) {
7719                                         /* Try to get a LEADER_LOCK HW lock as
7720                                          * long as a former leader may have
7721                                          * been unloaded by the user or
7722                                          * released a leadership by another
7723                                          * reason.
7724                                          */
7725                                         if (bnx2x_trylock_hw_lock(bp,
7726                                             HW_LOCK_RESOURCE_RESERVED_08)) {
7727                                                 /* I'm a leader now! Restart a
7728                                                  * switch case.
7729                                                  */
7730                                                 bp->is_leader = 1;
7731                                                 break;
7732                                         }
7733
7734                                         schedule_delayed_work(&bp->reset_task,
7735                                                                 HZ/10);
7736                                         return;
7737
7738                                 } else { /* A leader has completed
7739                                           * the "process kill". It's an exit
7740                                           * point for a non-leader.
7741                                           */
7742                                         bnx2x_nic_load(bp, LOAD_NORMAL);
7743                                         bp->recovery_state =
7744                                                 BNX2X_RECOVERY_DONE;
7745                                         smp_wmb();
7746                                         return;
7747                                 }
7748                         }
7749                 default:
7750                         return;
7751                 }
7752         }
7753 }
7754
7755 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7756  * scheduled on a general queue in order to prevent a dead lock.
7757  */
7758 static void bnx2x_reset_task(struct work_struct *work)
7759 {
7760         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7761
7762 #ifdef BNX2X_STOP_ON_ERROR
7763         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7764                   " so reset not done to allow debug dump,\n"
7765          KERN_ERR " you will need to reboot when done\n");
7766         return;
7767 #endif
7768
7769         rtnl_lock();
7770
7771         if (!netif_running(bp->dev))
7772                 goto reset_task_exit;
7773
7774         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7775                 bnx2x_parity_recover(bp);
7776         else {
7777                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7778                 bnx2x_nic_load(bp, LOAD_NORMAL);
7779         }
7780
7781 reset_task_exit:
7782         rtnl_unlock();
7783 }
7784
7785 /* end of nic load/unload */
7786
7787 /*
7788  * Init service functions
7789  */
7790
7791 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7792 {
7793         u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7794         u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7795         return base + (BP_ABS_FUNC(bp)) * stride;
7796 }
7797
7798 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
7799 {
7800         u32 reg = bnx2x_get_pretend_reg(bp);
7801
7802         /* Flush all outstanding writes */
7803         mmiowb();
7804
7805         /* Pretend to be function 0 */
7806         REG_WR(bp, reg, 0);
7807         REG_RD(bp, reg);        /* Flush the GRC transaction (in the chip) */
7808
7809         /* From now we are in the "like-E1" mode */
7810         bnx2x_int_disable(bp);
7811
7812         /* Flush all outstanding writes */
7813         mmiowb();
7814
7815         /* Restore the original function */
7816         REG_WR(bp, reg, BP_ABS_FUNC(bp));
7817         REG_RD(bp, reg);
7818 }
7819
7820 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
7821 {
7822         if (CHIP_IS_E1(bp))
7823                 bnx2x_int_disable(bp);
7824         else
7825                 bnx2x_undi_int_disable_e1h(bp);
7826 }
7827
7828 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7829 {
7830         u32 val;
7831
7832         /* Check if there is any driver already loaded */
7833         val = REG_RD(bp, MISC_REG_UNPREPARED);
7834         if (val == 0x1) {
7835                 /* Check if it is the UNDI driver
7836                  * UNDI driver initializes CID offset for normal bell to 0x7
7837                  */
7838                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7839                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7840                 if (val == 0x7) {
7841                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7842                         /* save our pf_num */
7843                         int orig_pf_num = bp->pf_num;
7844                         u32 swap_en;
7845                         u32 swap_val;
7846
7847                         /* clear the UNDI indication */
7848                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7849
7850                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7851
7852                         /* try unload UNDI on port 0 */
7853                         bp->pf_num = 0;
7854                         bp->fw_seq =
7855                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7856                                 DRV_MSG_SEQ_NUMBER_MASK);
7857                         reset_code = bnx2x_fw_command(bp, reset_code, 0);
7858
7859                         /* if UNDI is loaded on the other port */
7860                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7861
7862                                 /* send "DONE" for previous unload */
7863                                 bnx2x_fw_command(bp,
7864                                                  DRV_MSG_CODE_UNLOAD_DONE, 0);
7865
7866                                 /* unload UNDI on port 1 */
7867                                 bp->pf_num = 1;
7868                                 bp->fw_seq =
7869                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7870                                         DRV_MSG_SEQ_NUMBER_MASK);
7871                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7872
7873                                 bnx2x_fw_command(bp, reset_code, 0);
7874                         }
7875
7876                         /* now it's safe to release the lock */
7877                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7878
7879                         bnx2x_undi_int_disable(bp);
7880
7881                         /* close input traffic and wait for it */
7882                         /* Do not rcv packets to BRB */
7883                         REG_WR(bp,
7884                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7885                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7886                         /* Do not direct rcv packets that are not for MCP to
7887                          * the BRB */
7888                         REG_WR(bp,
7889                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7890                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7891                         /* clear AEU */
7892                         REG_WR(bp,
7893                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7894                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7895                         msleep(10);
7896
7897                         /* save NIG port swap info */
7898                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7899                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7900                         /* reset device */
7901                         REG_WR(bp,
7902                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7903                                0xd3ffffff);
7904                         REG_WR(bp,
7905                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7906                                0x1403);
7907                         /* take the NIG out of reset and restore swap values */
7908                         REG_WR(bp,
7909                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7910                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7911                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7912                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7913
7914                         /* send unload done to the MCP */
7915                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7916
7917                         /* restore our func and fw_seq */
7918                         bp->pf_num = orig_pf_num;
7919                         bp->fw_seq =
7920                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7921                                 DRV_MSG_SEQ_NUMBER_MASK);
7922                 } else
7923                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7924         }
7925 }
7926
7927 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7928 {
7929         u32 val, val2, val3, val4, id;
7930         u16 pmc;
7931
7932         /* Get the chip revision id and number. */
7933         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7934         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7935         id = ((val & 0xffff) << 16);
7936         val = REG_RD(bp, MISC_REG_CHIP_REV);
7937         id |= ((val & 0xf) << 12);
7938         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7939         id |= ((val & 0xff) << 4);
7940         val = REG_RD(bp, MISC_REG_BOND_ID);
7941         id |= (val & 0xf);
7942         bp->common.chip_id = id;
7943
7944         /* Set doorbell size */
7945         bp->db_size = (1 << BNX2X_DB_SHIFT);
7946
7947         if (CHIP_IS_E2(bp)) {
7948                 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7949                 if ((val & 1) == 0)
7950                         val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7951                 else
7952                         val = (val >> 1) & 1;
7953                 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7954                                                        "2_PORT_MODE");
7955                 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7956                                                  CHIP_2_PORT_MODE;
7957
7958                 if (CHIP_MODE_IS_4_PORT(bp))
7959                         bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
7960                 else
7961                         bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
7962         } else {
7963                 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7964                 bp->pfid = bp->pf_num;                  /* 0..7 */
7965         }
7966
7967         /*
7968          * set base FW non-default (fast path) status block id, this value is
7969          * used to initialize the fw_sb_id saved on the fp/queue structure to
7970          * determine the id used by the FW.
7971          */
7972         if (CHIP_IS_E1x(bp))
7973                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7974         else /* E2 */
7975                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7976
7977         bp->link_params.chip_id = bp->common.chip_id;
7978         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7979
7980         val = (REG_RD(bp, 0x2874) & 0x55);
7981         if ((bp->common.chip_id & 0x1) ||
7982             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7983                 bp->flags |= ONE_PORT_FLAG;
7984                 BNX2X_DEV_INFO("single port device\n");
7985         }
7986
7987         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7988         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7989                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7990         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7991                        bp->common.flash_size, bp->common.flash_size);
7992
7993         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7994         bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7995                                         MISC_REG_GENERIC_CR_1 :
7996                                         MISC_REG_GENERIC_CR_0));
7997         bp->link_params.shmem_base = bp->common.shmem_base;
7998         bp->link_params.shmem2_base = bp->common.shmem2_base;
7999         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8000                        bp->common.shmem_base, bp->common.shmem2_base);
8001
8002         if (!bp->common.shmem_base) {
8003                 BNX2X_DEV_INFO("MCP not active\n");
8004                 bp->flags |= NO_MCP_FLAG;
8005                 return;
8006         }
8007
8008         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8009         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8010                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8011                 BNX2X_ERR("BAD MCP validity signature\n");
8012
8013         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8014         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8015
8016         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8017                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8018                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8019
8020         bp->link_params.feature_config_flags = 0;
8021         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8022         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8023                 bp->link_params.feature_config_flags |=
8024                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8025         else
8026                 bp->link_params.feature_config_flags &=
8027                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8028
8029         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8030         bp->common.bc_ver = val;
8031         BNX2X_DEV_INFO("bc_ver %X\n", val);
8032         if (val < BNX2X_BC_VER) {
8033                 /* for now only warn
8034                  * later we might need to enforce this */
8035                 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
8036                           "please upgrade BC\n", BNX2X_BC_VER, val);
8037         }
8038         bp->link_params.feature_config_flags |=
8039                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
8040                                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8041
8042         bp->link_params.feature_config_flags |=
8043                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
8044                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
8045
8046         if (BP_E1HVN(bp) == 0) {
8047                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8048                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8049         } else {
8050                 /* no WOL capability for E1HVN != 0 */
8051                 bp->flags |= NO_WOL_FLAG;
8052         }
8053         BNX2X_DEV_INFO("%sWoL capable\n",
8054                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8055
8056         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8057         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8058         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8059         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8060
8061         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
8062                  val, val2, val3, val4);
8063 }
8064
8065 #define IGU_FID(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
8066 #define IGU_VEC(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
8067
8068 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
8069 {
8070         int pfid = BP_FUNC(bp);
8071         int vn = BP_E1HVN(bp);
8072         int igu_sb_id;
8073         u32 val;
8074         u8 fid;
8075
8076         bp->igu_base_sb = 0xff;
8077         bp->igu_sb_cnt = 0;
8078         if (CHIP_INT_MODE_IS_BC(bp)) {
8079                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8080                                        NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8081
8082                 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
8083                         FP_SB_MAX_E1x;
8084
8085                 bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
8086                         (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
8087
8088                 return;
8089         }
8090
8091         /* IGU in normal mode - read CAM */
8092         for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
8093              igu_sb_id++) {
8094                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
8095                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
8096                         continue;
8097                 fid = IGU_FID(val);
8098                 if ((fid & IGU_FID_ENCODE_IS_PF)) {
8099                         if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
8100                                 continue;
8101                         if (IGU_VEC(val) == 0)
8102                                 /* default status block */
8103                                 bp->igu_dsb_id = igu_sb_id;
8104                         else {
8105                                 if (bp->igu_base_sb == 0xff)
8106                                         bp->igu_base_sb = igu_sb_id;
8107                                 bp->igu_sb_cnt++;
8108                         }
8109                 }
8110         }
8111         bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8112                                    NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8113         if (bp->igu_sb_cnt == 0)
8114                 BNX2X_ERR("CAM configuration error\n");
8115 }
8116
8117 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8118                                                     u32 switch_cfg)
8119 {
8120         int cfg_size = 0, idx, port = BP_PORT(bp);
8121
8122         /* Aggregation of supported attributes of all external phys */
8123         bp->port.supported[0] = 0;
8124         bp->port.supported[1] = 0;
8125         switch (bp->link_params.num_phys) {
8126         case 1:
8127                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8128                 cfg_size = 1;
8129                 break;
8130         case 2:
8131                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8132                 cfg_size = 1;
8133                 break;
8134         case 3:
8135                 if (bp->link_params.multi_phy_config &
8136                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8137                         bp->port.supported[1] =
8138                                 bp->link_params.phy[EXT_PHY1].supported;
8139                         bp->port.supported[0] =
8140                                 bp->link_params.phy[EXT_PHY2].supported;
8141                 } else {
8142                         bp->port.supported[0] =
8143                                 bp->link_params.phy[EXT_PHY1].supported;
8144                         bp->port.supported[1] =
8145                                 bp->link_params.phy[EXT_PHY2].supported;
8146                 }
8147                 cfg_size = 2;
8148                 break;
8149         }
8150
8151         if (!(bp->port.supported[0] || bp->port.supported[1])) {
8152                 BNX2X_ERR("NVRAM config error. BAD phy config."
8153                           "PHY1 config 0x%x, PHY2 config 0x%x\n",
8154                            SHMEM_RD(bp,
8155                            dev_info.port_hw_config[port].external_phy_config),
8156                            SHMEM_RD(bp,
8157                            dev_info.port_hw_config[port].external_phy_config2));
8158                         return;
8159         }
8160
8161         switch (switch_cfg) {
8162         case SWITCH_CFG_1G:
8163                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8164                                            port*0x10);
8165                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8166                 break;
8167
8168         case SWITCH_CFG_10G:
8169                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8170                                            port*0x18);
8171                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8172                 break;
8173
8174         default:
8175                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8176                           bp->port.link_config[0]);
8177                 return;
8178         }
8179         /* mask what we support according to speed_cap_mask per configuration */
8180         for (idx = 0; idx < cfg_size; idx++) {
8181                 if (!(bp->link_params.speed_cap_mask[idx] &
8182                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8183                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
8184
8185                 if (!(bp->link_params.speed_cap_mask[idx] &
8186                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8187                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
8188
8189                 if (!(bp->link_params.speed_cap_mask[idx] &
8190                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8191                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
8192
8193                 if (!(bp->link_params.speed_cap_mask[idx] &
8194                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8195                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
8196
8197                 if (!(bp->link_params.speed_cap_mask[idx] &
8198                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8199                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
8200                                                      SUPPORTED_1000baseT_Full);
8201
8202                 if (!(bp->link_params.speed_cap_mask[idx] &
8203                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8204                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
8205
8206                 if (!(bp->link_params.speed_cap_mask[idx] &
8207                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8208                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
8209
8210         }
8211
8212         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8213                        bp->port.supported[1]);
8214 }
8215
8216 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8217 {
8218         u32 link_config, idx, cfg_size = 0;
8219         bp->port.advertising[0] = 0;
8220         bp->port.advertising[1] = 0;
8221         switch (bp->link_params.num_phys) {
8222         case 1:
8223         case 2:
8224                 cfg_size = 1;
8225                 break;
8226         case 3:
8227                 cfg_size = 2;
8228                 break;
8229         }
8230         for (idx = 0; idx < cfg_size; idx++) {
8231                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8232                 link_config = bp->port.link_config[idx];
8233                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8234                 case PORT_FEATURE_LINK_SPEED_AUTO:
8235                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8236                                 bp->link_params.req_line_speed[idx] =
8237                                         SPEED_AUTO_NEG;
8238                                 bp->port.advertising[idx] |=
8239                                         bp->port.supported[idx];
8240                         } else {
8241                                 /* force 10G, no AN */
8242                                 bp->link_params.req_line_speed[idx] =
8243                                         SPEED_10000;
8244                                 bp->port.advertising[idx] |=
8245                                         (ADVERTISED_10000baseT_Full |
8246                                          ADVERTISED_FIBRE);
8247                                 continue;
8248                         }
8249                         break;
8250
8251                 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8252                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8253                                 bp->link_params.req_line_speed[idx] =
8254                                         SPEED_10;
8255                                 bp->port.advertising[idx] |=
8256                                         (ADVERTISED_10baseT_Full |
8257                                          ADVERTISED_TP);
8258                         } else {
8259                                 BNX2X_ERROR("NVRAM config error. "
8260                                             "Invalid link_config 0x%x"
8261                                             "  speed_cap_mask 0x%x\n",
8262                                             link_config,
8263                                     bp->link_params.speed_cap_mask[idx]);
8264                                 return;
8265                         }
8266                         break;
8267
8268                 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8269                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8270                                 bp->link_params.req_line_speed[idx] =
8271                                         SPEED_10;
8272                                 bp->link_params.req_duplex[idx] =
8273                                         DUPLEX_HALF;
8274                                 bp->port.advertising[idx] |=
8275                                         (ADVERTISED_10baseT_Half |
8276                                          ADVERTISED_TP);
8277                         } else {
8278                                 BNX2X_ERROR("NVRAM config error. "
8279                                             "Invalid link_config 0x%x"
8280                                             "  speed_cap_mask 0x%x\n",
8281                                             link_config,
8282                                           bp->link_params.speed_cap_mask[idx]);
8283                                 return;
8284                         }
8285                         break;
8286
8287                 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8288                         if (bp->port.supported[idx] &
8289                             SUPPORTED_100baseT_Full) {
8290                                 bp->link_params.req_line_speed[idx] =
8291                                         SPEED_100;
8292                                 bp->port.advertising[idx] |=
8293                                         (ADVERTISED_100baseT_Full |
8294                                          ADVERTISED_TP);
8295                         } else {
8296                                 BNX2X_ERROR("NVRAM config error. "
8297                                             "Invalid link_config 0x%x"
8298                                             "  speed_cap_mask 0x%x\n",
8299                                             link_config,
8300                                           bp->link_params.speed_cap_mask[idx]);
8301                                 return;
8302                         }
8303                         break;
8304
8305                 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8306                         if (bp->port.supported[idx] &
8307                             SUPPORTED_100baseT_Half) {
8308                                 bp->link_params.req_line_speed[idx] =
8309                                                                 SPEED_100;
8310                                 bp->link_params.req_duplex[idx] =
8311                                                                 DUPLEX_HALF;
8312                                 bp->port.advertising[idx] |=
8313                                         (ADVERTISED_100baseT_Half |
8314                                          ADVERTISED_TP);
8315                         } else {
8316                                 BNX2X_ERROR("NVRAM config error. "
8317                                     "Invalid link_config 0x%x"
8318                                     "  speed_cap_mask 0x%x\n",
8319                                     link_config,
8320                                     bp->link_params.speed_cap_mask[idx]);
8321                                 return;
8322                         }
8323                         break;
8324
8325                 case PORT_FEATURE_LINK_SPEED_1G:
8326                         if (bp->port.supported[idx] &
8327                             SUPPORTED_1000baseT_Full) {
8328                                 bp->link_params.req_line_speed[idx] =
8329                                         SPEED_1000;
8330                                 bp->port.advertising[idx] |=
8331                                         (ADVERTISED_1000baseT_Full |
8332                                          ADVERTISED_TP);
8333                         } else {
8334                                 BNX2X_ERROR("NVRAM config error. "
8335                                     "Invalid link_config 0x%x"
8336                                     "  speed_cap_mask 0x%x\n",
8337                                     link_config,
8338                                     bp->link_params.speed_cap_mask[idx]);
8339                                 return;
8340                         }
8341                         break;
8342
8343                 case PORT_FEATURE_LINK_SPEED_2_5G:
8344                         if (bp->port.supported[idx] &
8345                             SUPPORTED_2500baseX_Full) {
8346                                 bp->link_params.req_line_speed[idx] =
8347                                         SPEED_2500;
8348                                 bp->port.advertising[idx] |=
8349                                         (ADVERTISED_2500baseX_Full |
8350                                                 ADVERTISED_TP);
8351                         } else {
8352                                 BNX2X_ERROR("NVRAM config error. "
8353                                     "Invalid link_config 0x%x"
8354                                     "  speed_cap_mask 0x%x\n",
8355                                     link_config,
8356                                     bp->link_params.speed_cap_mask[idx]);
8357                                 return;
8358                         }
8359                         break;
8360
8361                 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8362                 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8363                 case PORT_FEATURE_LINK_SPEED_10G_KR:
8364                         if (bp->port.supported[idx] &
8365                             SUPPORTED_10000baseT_Full) {
8366                                 bp->link_params.req_line_speed[idx] =
8367                                         SPEED_10000;
8368                                 bp->port.advertising[idx] |=
8369                                         (ADVERTISED_10000baseT_Full |
8370                                                 ADVERTISED_FIBRE);
8371                         } else {
8372                                 BNX2X_ERROR("NVRAM config error. "
8373                                     "Invalid link_config 0x%x"
8374                                     "  speed_cap_mask 0x%x\n",
8375                                     link_config,
8376                                     bp->link_params.speed_cap_mask[idx]);
8377                                 return;
8378                         }
8379                         break;
8380
8381                 default:
8382                         BNX2X_ERROR("NVRAM config error. "
8383                                     "BAD link speed link_config 0x%x\n",
8384                                           link_config);
8385                                 bp->link_params.req_line_speed[idx] =
8386                                                         SPEED_AUTO_NEG;
8387                                 bp->port.advertising[idx] =
8388                                                 bp->port.supported[idx];
8389                         break;
8390                 }
8391
8392                 bp->link_params.req_flow_ctrl[idx] = (link_config &
8393                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8394                 if ((bp->link_params.req_flow_ctrl[idx] ==
8395                      BNX2X_FLOW_CTRL_AUTO) &&
8396                     !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8397                         bp->link_params.req_flow_ctrl[idx] =
8398                                 BNX2X_FLOW_CTRL_NONE;
8399                 }
8400
8401                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
8402                                " 0x%x advertising 0x%x\n",
8403                                bp->link_params.req_line_speed[idx],
8404                                bp->link_params.req_duplex[idx],
8405                                bp->link_params.req_flow_ctrl[idx],
8406                                bp->port.advertising[idx]);
8407         }
8408 }
8409
8410 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8411 {
8412         mac_hi = cpu_to_be16(mac_hi);
8413         mac_lo = cpu_to_be32(mac_lo);
8414         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8415         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8416 }
8417
8418 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8419 {
8420         int port = BP_PORT(bp);
8421         u32 config;
8422         u32 ext_phy_type, ext_phy_config;
8423
8424         bp->link_params.bp = bp;
8425         bp->link_params.port = port;
8426
8427         bp->link_params.lane_config =
8428                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8429
8430         bp->link_params.speed_cap_mask[0] =
8431                 SHMEM_RD(bp,
8432                          dev_info.port_hw_config[port].speed_capability_mask);
8433         bp->link_params.speed_cap_mask[1] =
8434                 SHMEM_RD(bp,
8435                          dev_info.port_hw_config[port].speed_capability_mask2);
8436         bp->port.link_config[0] =
8437                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8438
8439         bp->port.link_config[1] =
8440                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
8441
8442         bp->link_params.multi_phy_config =
8443                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
8444         /* If the device is capable of WoL, set the default state according
8445          * to the HW
8446          */
8447         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8448         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8449                    (config & PORT_FEATURE_WOL_ENABLED));
8450
8451         BNX2X_DEV_INFO("lane_config 0x%08x  "
8452                        "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
8453                        bp->link_params.lane_config,
8454                        bp->link_params.speed_cap_mask[0],
8455                        bp->port.link_config[0]);
8456
8457         bp->link_params.switch_cfg = (bp->port.link_config[0] &
8458                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8459         bnx2x_phy_probe(&bp->link_params);
8460         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8461
8462         bnx2x_link_settings_requested(bp);
8463
8464         /*
8465          * If connected directly, work with the internal PHY, otherwise, work
8466          * with the external PHY
8467          */
8468         ext_phy_config =
8469                 SHMEM_RD(bp,
8470                          dev_info.port_hw_config[port].external_phy_config);
8471         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
8472         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8473                 bp->mdio.prtad = bp->port.phy_addr;
8474
8475         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8476                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8477                 bp->mdio.prtad =
8478                         XGXS_EXT_PHY_ADDR(ext_phy_config);
8479
8480         /*
8481          * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8482          * In MF mode, it is set to cover self test cases
8483          */
8484         if (IS_MF(bp))
8485                 bp->port.need_hw_lock = 1;
8486         else
8487                 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8488                                                         bp->common.shmem_base,
8489                                                         bp->common.shmem2_base);
8490 }
8491
8492 #ifdef BCM_CNIC
8493 static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8494 {
8495         u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8496                                 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8497         u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8498                                 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8499
8500         /* Get the number of maximum allowed iSCSI and FCoE connections */
8501         bp->cnic_eth_dev.max_iscsi_conn =
8502                 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8503                 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8504
8505         bp->cnic_eth_dev.max_fcoe_conn =
8506                 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8507                 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8508
8509         BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8510                        bp->cnic_eth_dev.max_iscsi_conn,
8511                        bp->cnic_eth_dev.max_fcoe_conn);
8512
8513         /* If mamimum allowed number of connections is zero -
8514          * disable the feature.
8515          */
8516         if (!bp->cnic_eth_dev.max_iscsi_conn)
8517                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8518
8519         if (!bp->cnic_eth_dev.max_fcoe_conn)
8520                 bp->flags |= NO_FCOE_FLAG;
8521 }
8522 #endif
8523
8524 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8525 {
8526         u32 val, val2;
8527         int func = BP_ABS_FUNC(bp);
8528         int port = BP_PORT(bp);
8529 #ifdef BCM_CNIC
8530         u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8531         u8 *fip_mac = bp->fip_mac;
8532 #endif
8533
8534         if (BP_NOMCP(bp)) {
8535                 BNX2X_ERROR("warning: random MAC workaround active\n");
8536                 random_ether_addr(bp->dev->dev_addr);
8537         } else if (IS_MF(bp)) {
8538                 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8539                 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8540                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8541                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8542                         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8543
8544 #ifdef BCM_CNIC
8545                 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8546                  * FCoE MAC then the appropriate feature should be disabled.
8547                  */
8548                 if (IS_MF_SI(bp)) {
8549                         u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8550                         if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8551                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
8552                                                      iscsi_mac_addr_upper);
8553                                 val = MF_CFG_RD(bp, func_ext_config[func].
8554                                                     iscsi_mac_addr_lower);
8555                                 BNX2X_DEV_INFO("Read iSCSI MAC: "
8556                                                "0x%x:0x%04x\n", val2, val);
8557                                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8558
8559                                 /* Disable iSCSI OOO if MAC configuration is
8560                                  * invalid.
8561                                  */
8562                                 if (!is_valid_ether_addr(iscsi_mac)) {
8563                                         bp->flags |= NO_ISCSI_OOO_FLAG |
8564                                                      NO_ISCSI_FLAG;
8565                                         memset(iscsi_mac, 0, ETH_ALEN);
8566                                 }
8567                         } else
8568                                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8569
8570                         if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8571                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
8572                                                      fcoe_mac_addr_upper);
8573                                 val = MF_CFG_RD(bp, func_ext_config[func].
8574                                                     fcoe_mac_addr_lower);
8575                                 BNX2X_DEV_INFO("Read FCoE MAC to "
8576                                                "0x%x:0x%04x\n", val2, val);
8577                                 bnx2x_set_mac_buf(fip_mac, val, val2);
8578
8579                                 /* Disable FCoE if MAC configuration is
8580                                  * invalid.
8581                                  */
8582                                 if (!is_valid_ether_addr(fip_mac)) {
8583                                         bp->flags |= NO_FCOE_FLAG;
8584                                         memset(bp->fip_mac, 0, ETH_ALEN);
8585                                 }
8586                         } else
8587                                 bp->flags |= NO_FCOE_FLAG;
8588                 }
8589 #endif
8590         } else {
8591                 /* in SF read MACs from port configuration */
8592                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8593                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8594                 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8595
8596 #ifdef BCM_CNIC
8597                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8598                                     iscsi_mac_upper);
8599                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8600                                    iscsi_mac_lower);
8601                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8602 #endif
8603         }
8604
8605         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8606         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8607
8608 #ifdef BCM_CNIC
8609         /* Set the FCoE MAC in modes other then MF_SI */
8610         if (!CHIP_IS_E1x(bp)) {
8611                 if (IS_MF_SD(bp))
8612                         memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8613                 else if (!IS_MF(bp))
8614                         memcpy(fip_mac, iscsi_mac, ETH_ALEN);
8615         }
8616 #endif
8617 }
8618
8619 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8620 {
8621         int /*abs*/func = BP_ABS_FUNC(bp);
8622         int vn, port;
8623         u32 val = 0;
8624         int rc = 0;
8625
8626         bnx2x_get_common_hwinfo(bp);
8627
8628         if (CHIP_IS_E1x(bp)) {
8629                 bp->common.int_block = INT_BLOCK_HC;
8630
8631                 bp->igu_dsb_id = DEF_SB_IGU_ID;
8632                 bp->igu_base_sb = 0;
8633                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8634                                        NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8635         } else {
8636                 bp->common.int_block = INT_BLOCK_IGU;
8637                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8638                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8639                         DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8640                         bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8641                 } else
8642                         DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8643
8644                 bnx2x_get_igu_cam_info(bp);
8645
8646         }
8647         DP(NETIF_MSG_PROBE, "igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n",
8648                              bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8649
8650         /*
8651          * Initialize MF configuration
8652          */
8653
8654         bp->mf_ov = 0;
8655         bp->mf_mode = 0;
8656         vn = BP_E1HVN(bp);
8657         port = BP_PORT(bp);
8658
8659         if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8660                 DP(NETIF_MSG_PROBE,
8661                             "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8662                             bp->common.shmem2_base, SHMEM2_RD(bp, size),
8663                             (u32)offsetof(struct shmem2_region, mf_cfg_addr));
8664                 if (SHMEM2_HAS(bp, mf_cfg_addr))
8665                         bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8666                 else
8667                         bp->common.mf_cfg_base = bp->common.shmem_base +
8668                                 offsetof(struct shmem_region, func_mb) +
8669                                 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8670                 /*
8671                  * get mf configuration:
8672                  * 1. existance of MF configuration
8673                  * 2. MAC address must be legal (check only upper bytes)
8674                  *    for  Switch-Independent mode;
8675                  *    OVLAN must be legal for Switch-Dependent mode
8676                  * 3. SF_MODE configures specific MF mode
8677                  */
8678                 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8679                         /* get mf configuration */
8680                         val = SHMEM_RD(bp,
8681                                        dev_info.shared_feature_config.config);
8682                         val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8683
8684                         switch (val) {
8685                         case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8686                                 val = MF_CFG_RD(bp, func_mf_config[func].
8687                                                 mac_upper);
8688                                 /* check for legal mac (upper bytes)*/
8689                                 if (val != 0xffff) {
8690                                         bp->mf_mode = MULTI_FUNCTION_SI;
8691                                         bp->mf_config[vn] = MF_CFG_RD(bp,
8692                                                    func_mf_config[func].config);
8693                                 } else
8694                                         DP(NETIF_MSG_PROBE, "illegal MAC "
8695                                                             "address for SI\n");
8696                                 break;
8697                         case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8698                                 /* get OV configuration */
8699                                 val = MF_CFG_RD(bp,
8700                                         func_mf_config[FUNC_0].e1hov_tag);
8701                                 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8702
8703                                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8704                                         bp->mf_mode = MULTI_FUNCTION_SD;
8705                                         bp->mf_config[vn] = MF_CFG_RD(bp,
8706                                                 func_mf_config[func].config);
8707                                 } else
8708                                         DP(NETIF_MSG_PROBE, "illegal OV for "
8709                                                             "SD\n");
8710                                 break;
8711                         default:
8712                                 /* Unknown configuration: reset mf_config */
8713                                 bp->mf_config[vn] = 0;
8714                                 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8715                                    val);
8716                         }
8717                 }
8718
8719                 BNX2X_DEV_INFO("%s function mode\n",
8720                                IS_MF(bp) ? "multi" : "single");
8721
8722                 switch (bp->mf_mode) {
8723                 case MULTI_FUNCTION_SD:
8724                         val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8725                               FUNC_MF_CFG_E1HOV_TAG_MASK;
8726                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8727                                 bp->mf_ov = val;
8728                                 BNX2X_DEV_INFO("MF OV for func %d is %d"
8729                                                " (0x%04x)\n", func,
8730                                                bp->mf_ov, bp->mf_ov);
8731                         } else {
8732                                 BNX2X_ERR("No valid MF OV for func %d,"
8733                                           "  aborting\n", func);
8734                                 rc = -EPERM;
8735                         }
8736                         break;
8737                 case MULTI_FUNCTION_SI:
8738                         BNX2X_DEV_INFO("func %d is in MF "
8739                                        "switch-independent mode\n", func);
8740                         break;
8741                 default:
8742                         if (vn) {
8743                                 BNX2X_ERR("VN %d in single function mode,"
8744                                           "  aborting\n", vn);
8745                                 rc = -EPERM;
8746                         }
8747                         break;
8748                 }
8749
8750         }
8751
8752         /* adjust igu_sb_cnt to MF for E1x */
8753         if (CHIP_IS_E1x(bp) && IS_MF(bp))
8754                 bp->igu_sb_cnt /= E1HVN_MAX;
8755
8756         /*
8757          * adjust E2 sb count: to be removed when FW will support
8758          * more then 16 L2 clients
8759          */
8760 #define MAX_L2_CLIENTS                          16
8761         if (CHIP_IS_E2(bp))
8762                 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8763                                        MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8764
8765         if (!BP_NOMCP(bp)) {
8766                 bnx2x_get_port_hwinfo(bp);
8767
8768                 bp->fw_seq =
8769                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8770                          DRV_MSG_SEQ_NUMBER_MASK);
8771                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8772         }
8773
8774         /* Get MAC addresses */
8775         bnx2x_get_mac_hwinfo(bp);
8776
8777 #ifdef BCM_CNIC
8778         bnx2x_get_cnic_info(bp);
8779 #endif
8780
8781         return rc;
8782 }
8783
8784 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8785 {
8786         int cnt, i, block_end, rodi;
8787         char vpd_data[BNX2X_VPD_LEN+1];
8788         char str_id_reg[VENDOR_ID_LEN+1];
8789         char str_id_cap[VENDOR_ID_LEN+1];
8790         u8 len;
8791
8792         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8793         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8794
8795         if (cnt < BNX2X_VPD_LEN)
8796                 goto out_not_found;
8797
8798         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8799                              PCI_VPD_LRDT_RO_DATA);
8800         if (i < 0)
8801                 goto out_not_found;
8802
8803
8804         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8805                     pci_vpd_lrdt_size(&vpd_data[i]);
8806
8807         i += PCI_VPD_LRDT_TAG_SIZE;
8808
8809         if (block_end > BNX2X_VPD_LEN)
8810                 goto out_not_found;
8811
8812         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8813                                    PCI_VPD_RO_KEYWORD_MFR_ID);
8814         if (rodi < 0)
8815                 goto out_not_found;
8816
8817         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8818
8819         if (len != VENDOR_ID_LEN)
8820                 goto out_not_found;
8821
8822         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8823
8824         /* vendor specific info */
8825         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8826         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8827         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8828             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8829
8830                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8831                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
8832                 if (rodi >= 0) {
8833                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8834
8835                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8836
8837                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8838                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8839                                 bp->fw_ver[len] = ' ';
8840                         }
8841                 }
8842                 return;
8843         }
8844 out_not_found:
8845         return;
8846 }
8847
8848 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8849 {
8850         int func;
8851         int timer_interval;
8852         int rc;
8853
8854         /* Disable interrupt handling until HW is initialized */
8855         atomic_set(&bp->intr_sem, 1);
8856         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8857
8858         mutex_init(&bp->port.phy_mutex);
8859         mutex_init(&bp->fw_mb_mutex);
8860         spin_lock_init(&bp->stats_lock);
8861 #ifdef BCM_CNIC
8862         mutex_init(&bp->cnic_mutex);
8863 #endif
8864
8865         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8866         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8867
8868         rc = bnx2x_get_hwinfo(bp);
8869
8870         if (!rc)
8871                 rc = bnx2x_alloc_mem_bp(bp);
8872
8873         bnx2x_read_fwinfo(bp);
8874
8875         func = BP_FUNC(bp);
8876
8877         /* need to reset chip if undi was active */
8878         if (!BP_NOMCP(bp))
8879                 bnx2x_undi_unload(bp);
8880
8881         if (CHIP_REV_IS_FPGA(bp))
8882                 dev_err(&bp->pdev->dev, "FPGA detected\n");
8883
8884         if (BP_NOMCP(bp) && (func == 0))
8885                 dev_err(&bp->pdev->dev, "MCP disabled, "
8886                                         "must load devices in order!\n");
8887
8888         bp->multi_mode = multi_mode;
8889         bp->int_mode = int_mode;
8890
8891         bp->dev->features |= NETIF_F_GRO;
8892
8893         /* Set TPA flags */
8894         if (disable_tpa) {
8895                 bp->flags &= ~TPA_ENABLE_FLAG;
8896                 bp->dev->features &= ~NETIF_F_LRO;
8897         } else {
8898                 bp->flags |= TPA_ENABLE_FLAG;
8899                 bp->dev->features |= NETIF_F_LRO;
8900         }
8901         bp->disable_tpa = disable_tpa;
8902
8903         if (CHIP_IS_E1(bp))
8904                 bp->dropless_fc = 0;
8905         else
8906                 bp->dropless_fc = dropless_fc;
8907
8908         bp->mrrs = mrrs;
8909
8910         bp->tx_ring_size = MAX_TX_AVAIL;
8911
8912         bp->rx_csum = 1;
8913
8914         /* make sure that the numbers are in the right granularity */
8915         bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8916         bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
8917
8918         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8919         bp->current_interval = (poll ? poll : timer_interval);
8920
8921         init_timer(&bp->timer);
8922         bp->timer.expires = jiffies + bp->current_interval;
8923         bp->timer.data = (unsigned long) bp;
8924         bp->timer.function = bnx2x_timer;
8925
8926         bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
8927         bnx2x_dcbx_init_params(bp);
8928
8929         return rc;
8930 }
8931
8932
8933 /****************************************************************************
8934 * General service functions
8935 ****************************************************************************/
8936
8937 /* called with rtnl_lock */
8938 static int bnx2x_open(struct net_device *dev)
8939 {
8940         struct bnx2x *bp = netdev_priv(dev);
8941
8942         netif_carrier_off(dev);
8943
8944         bnx2x_set_power_state(bp, PCI_D0);
8945
8946         if (!bnx2x_reset_is_done(bp)) {
8947                 do {
8948                         /* Reset MCP mail box sequence if there is on going
8949                          * recovery
8950                          */
8951                         bp->fw_seq = 0;
8952
8953                         /* If it's the first function to load and reset done
8954                          * is still not cleared it may mean that. We don't
8955                          * check the attention state here because it may have
8956                          * already been cleared by a "common" reset but we
8957                          * shell proceed with "process kill" anyway.
8958                          */
8959                         if ((bnx2x_get_load_cnt(bp) == 0) &&
8960                                 bnx2x_trylock_hw_lock(bp,
8961                                 HW_LOCK_RESOURCE_RESERVED_08) &&
8962                                 (!bnx2x_leader_reset(bp))) {
8963                                 DP(NETIF_MSG_HW, "Recovered in open\n");
8964                                 break;
8965                         }
8966
8967                         bnx2x_set_power_state(bp, PCI_D3hot);
8968
8969                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8970                         " completed yet. Try again later. If u still see this"
8971                         " message after a few retries then power cycle is"
8972                         " required.\n", bp->dev->name);
8973
8974                         return -EAGAIN;
8975                 } while (0);
8976         }
8977
8978         bp->recovery_state = BNX2X_RECOVERY_DONE;
8979
8980         return bnx2x_nic_load(bp, LOAD_OPEN);
8981 }
8982
8983 /* called with rtnl_lock */
8984 static int bnx2x_close(struct net_device *dev)
8985 {
8986         struct bnx2x *bp = netdev_priv(dev);
8987
8988         /* Unload the driver, release IRQs */
8989         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
8990         bnx2x_set_power_state(bp, PCI_D3hot);
8991
8992         return 0;
8993 }
8994
8995 #define E1_MAX_UC_LIST  29
8996 #define E1H_MAX_UC_LIST 30
8997 #define E2_MAX_UC_LIST  14
8998 static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
8999 {
9000         if (CHIP_IS_E1(bp))
9001                 return E1_MAX_UC_LIST;
9002         else if (CHIP_IS_E1H(bp))
9003                 return E1H_MAX_UC_LIST;
9004         else
9005                 return E2_MAX_UC_LIST;
9006 }
9007
9008
9009 static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
9010 {
9011         if (CHIP_IS_E1(bp))
9012                 /* CAM Entries for Port0:
9013                  *      0 - prim ETH MAC
9014                  *      1 - BCAST MAC
9015                  *      2 - iSCSI L2 ring ETH MAC
9016                  *      3-31 - UC MACs
9017                  *
9018                  * Port1 entries are allocated the same way starting from
9019                  * entry 32.
9020                  */
9021                 return 3 + 32 * BP_PORT(bp);
9022         else if (CHIP_IS_E1H(bp)) {
9023                 /* CAM Entries:
9024                  *      0-7  - prim ETH MAC for each function
9025                  *      8-15 - iSCSI L2 ring ETH MAC for each function
9026                  *      16 till 255 UC MAC lists for each function
9027                  *
9028                  * Remark: There is no FCoE support for E1H, thus FCoE related
9029                  *         MACs are not considered.
9030                  */
9031                 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
9032                         bnx2x_max_uc_list(bp) * BP_FUNC(bp);
9033         } else {
9034                 /* CAM Entries (there is a separate CAM per engine):
9035                  *      0-4  - prim ETH MAC for each function
9036                  *      4-7 - iSCSI L2 ring ETH MAC for each function
9037                  *      8-11 - FIP ucast L2 MAC for each function
9038                  *      12-15 - ALL_ENODE_MACS mcast MAC for each function
9039                  *      16 till 71 UC MAC lists for each function
9040                  */
9041                 u8 func_idx =
9042                         (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
9043
9044                 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
9045                         bnx2x_max_uc_list(bp) * func_idx;
9046         }
9047 }
9048
9049 /* set uc list, do not wait as wait implies sleep and
9050  * set_rx_mode can be invoked from non-sleepable context.
9051  *
9052  * Instead we use the same ramrod data buffer each time we need
9053  * to configure a list of addresses, and use the fact that the
9054  * list of MACs is changed in an incremental way and that the
9055  * function is called under the netif_addr_lock. A temporary
9056  * inconsistent CAM configuration (possible in case of very fast
9057  * sequence of add/del/add on the host side) will shortly be
9058  * restored by the handler of the last ramrod.
9059  */
9060 static int bnx2x_set_uc_list(struct bnx2x *bp)
9061 {
9062         int i = 0, old;
9063         struct net_device *dev = bp->dev;
9064         u8 offset = bnx2x_uc_list_cam_offset(bp);
9065         struct netdev_hw_addr *ha;
9066         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9067         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9068
9069         if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
9070                 return -EINVAL;
9071
9072         netdev_for_each_uc_addr(ha, dev) {
9073                 /* copy mac */
9074                 config_cmd->config_table[i].msb_mac_addr =
9075                         swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
9076                 config_cmd->config_table[i].middle_mac_addr =
9077                         swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
9078                 config_cmd->config_table[i].lsb_mac_addr =
9079                         swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
9080
9081                 config_cmd->config_table[i].vlan_id = 0;
9082                 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
9083                 config_cmd->config_table[i].clients_bit_vector =
9084                         cpu_to_le32(1 << BP_L_ID(bp));
9085
9086                 SET_FLAG(config_cmd->config_table[i].flags,
9087                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9088                         T_ETH_MAC_COMMAND_SET);
9089
9090                 DP(NETIF_MSG_IFUP,
9091                    "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
9092                    config_cmd->config_table[i].msb_mac_addr,
9093                    config_cmd->config_table[i].middle_mac_addr,
9094                    config_cmd->config_table[i].lsb_mac_addr);
9095
9096                 i++;
9097
9098                 /* Set uc MAC in NIG */
9099                 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
9100                                      LLH_CAM_ETH_LINE + i);
9101         }
9102         old = config_cmd->hdr.length;
9103         if (old > i) {
9104                 for (; i < old; i++) {
9105                         if (CAM_IS_INVALID(config_cmd->
9106                                            config_table[i])) {
9107                                 /* already invalidated */
9108                                 break;
9109                         }
9110                         /* invalidate */
9111                         SET_FLAG(config_cmd->config_table[i].flags,
9112                                 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9113                                 T_ETH_MAC_COMMAND_INVALIDATE);
9114                 }
9115         }
9116
9117         wmb();
9118
9119         config_cmd->hdr.length = i;
9120         config_cmd->hdr.offset = offset;
9121         config_cmd->hdr.client_id = 0xff;
9122         /* Mark that this ramrod doesn't use bp->set_mac_pending for
9123          * synchronization.
9124          */
9125         config_cmd->hdr.echo = 0;
9126
9127         mb();
9128
9129         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9130                    U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9131
9132 }
9133
9134 void bnx2x_invalidate_uc_list(struct bnx2x *bp)
9135 {
9136         int i;
9137         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9138         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9139         int ramrod_flags = WAIT_RAMROD_COMMON;
9140         u8 offset = bnx2x_uc_list_cam_offset(bp);
9141         u8 max_list_size = bnx2x_max_uc_list(bp);
9142
9143         for (i = 0; i < max_list_size; i++) {
9144                 SET_FLAG(config_cmd->config_table[i].flags,
9145                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9146                         T_ETH_MAC_COMMAND_INVALIDATE);
9147                 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
9148         }
9149
9150         wmb();
9151
9152         config_cmd->hdr.length = max_list_size;
9153         config_cmd->hdr.offset = offset;
9154         config_cmd->hdr.client_id = 0xff;
9155         /* We'll wait for a completion this time... */
9156         config_cmd->hdr.echo = 1;
9157
9158         bp->set_mac_pending = 1;
9159
9160         mb();
9161
9162         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9163                       U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9164
9165         /* Wait for a completion */
9166         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
9167                                 ramrod_flags);
9168
9169 }
9170
9171 static inline int bnx2x_set_mc_list(struct bnx2x *bp)
9172 {
9173         /* some multicasts */
9174         if (CHIP_IS_E1(bp)) {
9175                 return bnx2x_set_e1_mc_list(bp);
9176         } else { /* E1H and newer */
9177                 return bnx2x_set_e1h_mc_list(bp);
9178         }
9179 }
9180
9181 /* called with netif_tx_lock from dev_mcast.c */
9182 void bnx2x_set_rx_mode(struct net_device *dev)
9183 {
9184         struct bnx2x *bp = netdev_priv(dev);
9185         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9186
9187         if (bp->state != BNX2X_STATE_OPEN) {
9188                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9189                 return;
9190         }
9191
9192         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9193
9194         if (dev->flags & IFF_PROMISC)
9195                 rx_mode = BNX2X_RX_MODE_PROMISC;
9196         else if (dev->flags & IFF_ALLMULTI)
9197                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9198         else {
9199                 /* some multicasts */
9200                 if (bnx2x_set_mc_list(bp))
9201                         rx_mode = BNX2X_RX_MODE_ALLMULTI;
9202
9203                 /* some unicasts */
9204                 if (bnx2x_set_uc_list(bp))
9205                         rx_mode = BNX2X_RX_MODE_PROMISC;
9206         }
9207
9208         bp->rx_mode = rx_mode;
9209         bnx2x_set_storm_rx_mode(bp);
9210 }
9211
9212 /* called with rtnl_lock */
9213 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
9214                            int devad, u16 addr)
9215 {
9216         struct bnx2x *bp = netdev_priv(netdev);
9217         u16 value;
9218         int rc;
9219
9220         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
9221            prtad, devad, addr);
9222
9223         /* The HW expects different devad if CL22 is used */
9224         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
9225
9226         bnx2x_acquire_phy_lock(bp);
9227         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
9228         bnx2x_release_phy_lock(bp);
9229         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
9230
9231         if (!rc)
9232                 rc = value;
9233         return rc;
9234 }
9235
9236 /* called with rtnl_lock */
9237 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
9238                             u16 addr, u16 value)
9239 {
9240         struct bnx2x *bp = netdev_priv(netdev);
9241         int rc;
9242
9243         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
9244                            " value 0x%x\n", prtad, devad, addr, value);
9245
9246         /* The HW expects different devad if CL22 is used */
9247         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
9248
9249         bnx2x_acquire_phy_lock(bp);
9250         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
9251         bnx2x_release_phy_lock(bp);
9252         return rc;
9253 }
9254
9255 /* called with rtnl_lock */
9256 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9257 {
9258         struct bnx2x *bp = netdev_priv(dev);
9259         struct mii_ioctl_data *mdio = if_mii(ifr);
9260
9261         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
9262            mdio->phy_id, mdio->reg_num, mdio->val_in);
9263
9264         if (!netif_running(dev))
9265                 return -EAGAIN;
9266
9267         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
9268 }
9269
9270 #ifdef CONFIG_NET_POLL_CONTROLLER
9271 static void poll_bnx2x(struct net_device *dev)
9272 {
9273         struct bnx2x *bp = netdev_priv(dev);
9274
9275         disable_irq(bp->pdev->irq);
9276         bnx2x_interrupt(bp->pdev->irq, dev);
9277         enable_irq(bp->pdev->irq);
9278 }
9279 #endif
9280
9281 static const struct net_device_ops bnx2x_netdev_ops = {
9282         .ndo_open               = bnx2x_open,
9283         .ndo_stop               = bnx2x_close,
9284         .ndo_start_xmit         = bnx2x_start_xmit,
9285         .ndo_select_queue       = bnx2x_select_queue,
9286         .ndo_set_rx_mode        = bnx2x_set_rx_mode,
9287         .ndo_set_mac_address    = bnx2x_change_mac_addr,
9288         .ndo_validate_addr      = eth_validate_addr,
9289         .ndo_do_ioctl           = bnx2x_ioctl,
9290         .ndo_change_mtu         = bnx2x_change_mtu,
9291         .ndo_tx_timeout         = bnx2x_tx_timeout,
9292 #ifdef CONFIG_NET_POLL_CONTROLLER
9293         .ndo_poll_controller    = poll_bnx2x,
9294 #endif
9295 };
9296
9297 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9298                                     struct net_device *dev)
9299 {
9300         struct bnx2x *bp;
9301         int rc;
9302
9303         SET_NETDEV_DEV(dev, &pdev->dev);
9304         bp = netdev_priv(dev);
9305
9306         bp->dev = dev;
9307         bp->pdev = pdev;
9308         bp->flags = 0;
9309         bp->pf_num = PCI_FUNC(pdev->devfn);
9310
9311         rc = pci_enable_device(pdev);
9312         if (rc) {
9313                 dev_err(&bp->pdev->dev,
9314                         "Cannot enable PCI device, aborting\n");
9315                 goto err_out;
9316         }
9317
9318         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9319                 dev_err(&bp->pdev->dev,
9320                         "Cannot find PCI device base address, aborting\n");
9321                 rc = -ENODEV;
9322                 goto err_out_disable;
9323         }
9324
9325         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9326                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9327                        " base address, aborting\n");
9328                 rc = -ENODEV;
9329                 goto err_out_disable;
9330         }
9331
9332         if (atomic_read(&pdev->enable_cnt) == 1) {
9333                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9334                 if (rc) {
9335                         dev_err(&bp->pdev->dev,
9336                                 "Cannot obtain PCI resources, aborting\n");
9337                         goto err_out_disable;
9338                 }
9339
9340                 pci_set_master(pdev);
9341                 pci_save_state(pdev);
9342         }
9343
9344         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9345         if (bp->pm_cap == 0) {
9346                 dev_err(&bp->pdev->dev,
9347                         "Cannot find power management capability, aborting\n");
9348                 rc = -EIO;
9349                 goto err_out_release;
9350         }
9351
9352         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9353         if (bp->pcie_cap == 0) {
9354                 dev_err(&bp->pdev->dev,
9355                         "Cannot find PCI Express capability, aborting\n");
9356                 rc = -EIO;
9357                 goto err_out_release;
9358         }
9359
9360         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
9361                 bp->flags |= USING_DAC_FLAG;
9362                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
9363                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9364                                " failed, aborting\n");
9365                         rc = -EIO;
9366                         goto err_out_release;
9367                 }
9368
9369         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9370                 dev_err(&bp->pdev->dev,
9371                         "System does not support DMA, aborting\n");
9372                 rc = -EIO;
9373                 goto err_out_release;
9374         }
9375
9376         dev->mem_start = pci_resource_start(pdev, 0);
9377         dev->base_addr = dev->mem_start;
9378         dev->mem_end = pci_resource_end(pdev, 0);
9379
9380         dev->irq = pdev->irq;
9381
9382         bp->regview = pci_ioremap_bar(pdev, 0);
9383         if (!bp->regview) {
9384                 dev_err(&bp->pdev->dev,
9385                         "Cannot map register space, aborting\n");
9386                 rc = -ENOMEM;
9387                 goto err_out_release;
9388         }
9389
9390         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9391                                         min_t(u64, BNX2X_DB_SIZE(bp),
9392                                               pci_resource_len(pdev, 2)));
9393         if (!bp->doorbells) {
9394                 dev_err(&bp->pdev->dev,
9395                         "Cannot map doorbell space, aborting\n");
9396                 rc = -ENOMEM;
9397                 goto err_out_unmap;
9398         }
9399
9400         bnx2x_set_power_state(bp, PCI_D0);
9401
9402         /* clean indirect addresses */
9403         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9404                                PCICFG_VENDOR_ID_OFFSET);
9405         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9406         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9407         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9408         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
9409
9410         /* Reset the load counter */
9411         bnx2x_clear_load_cnt(bp);
9412
9413         dev->watchdog_timeo = TX_TIMEOUT;
9414
9415         dev->netdev_ops = &bnx2x_netdev_ops;
9416         bnx2x_set_ethtool_ops(dev);
9417         dev->features |= NETIF_F_SG;
9418         dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9419         if (bp->flags & USING_DAC_FLAG)
9420                 dev->features |= NETIF_F_HIGHDMA;
9421         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9422         dev->features |= NETIF_F_TSO6;
9423         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9424
9425         dev->vlan_features |= NETIF_F_SG;
9426         dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9427         if (bp->flags & USING_DAC_FLAG)
9428                 dev->vlan_features |= NETIF_F_HIGHDMA;
9429         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9430         dev->vlan_features |= NETIF_F_TSO6;
9431
9432 #ifdef BCM_DCB
9433         dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9434 #endif
9435
9436         /* get_port_hwinfo() will set prtad and mmds properly */
9437         bp->mdio.prtad = MDIO_PRTAD_NONE;
9438         bp->mdio.mmds = 0;
9439         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9440         bp->mdio.dev = dev;
9441         bp->mdio.mdio_read = bnx2x_mdio_read;
9442         bp->mdio.mdio_write = bnx2x_mdio_write;
9443
9444         return 0;
9445
9446 err_out_unmap:
9447         if (bp->regview) {
9448                 iounmap(bp->regview);
9449                 bp->regview = NULL;
9450         }
9451         if (bp->doorbells) {
9452                 iounmap(bp->doorbells);
9453                 bp->doorbells = NULL;
9454         }
9455
9456 err_out_release:
9457         if (atomic_read(&pdev->enable_cnt) == 1)
9458                 pci_release_regions(pdev);
9459
9460 err_out_disable:
9461         pci_disable_device(pdev);
9462         pci_set_drvdata(pdev, NULL);
9463
9464 err_out:
9465         return rc;
9466 }
9467
9468 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9469                                                  int *width, int *speed)
9470 {
9471         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9472
9473         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9474
9475         /* return value of 1=2.5GHz 2=5GHz */
9476         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9477 }
9478
9479 static int bnx2x_check_firmware(struct bnx2x *bp)
9480 {
9481         const struct firmware *firmware = bp->firmware;
9482         struct bnx2x_fw_file_hdr *fw_hdr;
9483         struct bnx2x_fw_file_section *sections;
9484         u32 offset, len, num_ops;
9485         u16 *ops_offsets;
9486         int i;
9487         const u8 *fw_ver;
9488
9489         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9490                 return -EINVAL;
9491
9492         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9493         sections = (struct bnx2x_fw_file_section *)fw_hdr;
9494
9495         /* Make sure none of the offsets and sizes make us read beyond
9496          * the end of the firmware data */
9497         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9498                 offset = be32_to_cpu(sections[i].offset);
9499                 len = be32_to_cpu(sections[i].len);
9500                 if (offset + len > firmware->size) {
9501                         dev_err(&bp->pdev->dev,
9502                                 "Section %d length is out of bounds\n", i);
9503                         return -EINVAL;
9504                 }
9505         }
9506
9507         /* Likewise for the init_ops offsets */
9508         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9509         ops_offsets = (u16 *)(firmware->data + offset);
9510         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9511
9512         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9513                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
9514                         dev_err(&bp->pdev->dev,
9515                                 "Section offset %d is out of bounds\n", i);
9516                         return -EINVAL;
9517                 }
9518         }
9519
9520         /* Check FW version */
9521         offset = be32_to_cpu(fw_hdr->fw_version.offset);
9522         fw_ver = firmware->data + offset;
9523         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9524             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9525             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9526             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
9527                 dev_err(&bp->pdev->dev,
9528                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
9529                        fw_ver[0], fw_ver[1], fw_ver[2],
9530                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9531                        BCM_5710_FW_MINOR_VERSION,
9532                        BCM_5710_FW_REVISION_VERSION,
9533                        BCM_5710_FW_ENGINEERING_VERSION);
9534                 return -EINVAL;
9535         }
9536
9537         return 0;
9538 }
9539
9540 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
9541 {
9542         const __be32 *source = (const __be32 *)_source;
9543         u32 *target = (u32 *)_target;
9544         u32 i;
9545
9546         for (i = 0; i < n/4; i++)
9547                 target[i] = be32_to_cpu(source[i]);
9548 }
9549
9550 /*
9551    Ops array is stored in the following format:
9552    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9553  */
9554 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
9555 {
9556         const __be32 *source = (const __be32 *)_source;
9557         struct raw_op *target = (struct raw_op *)_target;
9558         u32 i, j, tmp;
9559
9560         for (i = 0, j = 0; i < n/8; i++, j += 2) {
9561                 tmp = be32_to_cpu(source[j]);
9562                 target[i].op = (tmp >> 24) & 0xff;
9563                 target[i].offset = tmp & 0xffffff;
9564                 target[i].raw_data = be32_to_cpu(source[j + 1]);
9565         }
9566 }
9567
9568 /**
9569  * IRO array is stored in the following format:
9570  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9571  */
9572 static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9573 {
9574         const __be32 *source = (const __be32 *)_source;
9575         struct iro *target = (struct iro *)_target;
9576         u32 i, j, tmp;
9577
9578         for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9579                 target[i].base = be32_to_cpu(source[j]);
9580                 j++;
9581                 tmp = be32_to_cpu(source[j]);
9582                 target[i].m1 = (tmp >> 16) & 0xffff;
9583                 target[i].m2 = tmp & 0xffff;
9584                 j++;
9585                 tmp = be32_to_cpu(source[j]);
9586                 target[i].m3 = (tmp >> 16) & 0xffff;
9587                 target[i].size = tmp & 0xffff;
9588                 j++;
9589         }
9590 }
9591
9592 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
9593 {
9594         const __be16 *source = (const __be16 *)_source;
9595         u16 *target = (u16 *)_target;
9596         u32 i;
9597
9598         for (i = 0; i < n/2; i++)
9599                 target[i] = be16_to_cpu(source[i]);
9600 }
9601
9602 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
9603 do {                                                                    \
9604         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
9605         bp->arr = kmalloc(len, GFP_KERNEL);                             \
9606         if (!bp->arr) {                                                 \
9607                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9608                 goto lbl;                                               \
9609         }                                                               \
9610         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
9611              (u8 *)bp->arr, len);                                       \
9612 } while (0)
9613
9614 int bnx2x_init_firmware(struct bnx2x *bp)
9615 {
9616         const char *fw_file_name;
9617         struct bnx2x_fw_file_hdr *fw_hdr;
9618         int rc;
9619
9620         if (CHIP_IS_E1(bp))
9621                 fw_file_name = FW_FILE_NAME_E1;
9622         else if (CHIP_IS_E1H(bp))
9623                 fw_file_name = FW_FILE_NAME_E1H;
9624         else if (CHIP_IS_E2(bp))
9625                 fw_file_name = FW_FILE_NAME_E2;
9626         else {
9627                 BNX2X_ERR("Unsupported chip revision\n");
9628                 return -EINVAL;
9629         }
9630
9631         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
9632
9633         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
9634         if (rc) {
9635                 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
9636                 goto request_firmware_exit;
9637         }
9638
9639         rc = bnx2x_check_firmware(bp);
9640         if (rc) {
9641                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
9642                 goto request_firmware_exit;
9643         }
9644
9645         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9646
9647         /* Initialize the pointers to the init arrays */
9648         /* Blob */
9649         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9650
9651         /* Opcodes */
9652         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9653
9654         /* Offsets */
9655         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9656                             be16_to_cpu_n);
9657
9658         /* STORMs firmware */
9659         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9660                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9661         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
9662                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9663         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9664                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9665         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
9666                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
9667         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9668                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9669         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
9670                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9671         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9672                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9673         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
9674                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
9675         /* IRO */
9676         BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
9677
9678         return 0;
9679
9680 iro_alloc_err:
9681         kfree(bp->init_ops_offsets);
9682 init_offsets_alloc_err:
9683         kfree(bp->init_ops);
9684 init_ops_alloc_err:
9685         kfree(bp->init_data);
9686 request_firmware_exit:
9687         release_firmware(bp->firmware);
9688
9689         return rc;
9690 }
9691
9692 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9693 {
9694         int cid_count = L2_FP_COUNT(l2_cid_count);
9695
9696 #ifdef BCM_CNIC
9697         cid_count += CNIC_CID_MAX;
9698 #endif
9699         return roundup(cid_count, QM_CID_ROUND);
9700 }
9701
9702 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9703                                     const struct pci_device_id *ent)
9704 {
9705         struct net_device *dev = NULL;
9706         struct bnx2x *bp;
9707         int pcie_width, pcie_speed;
9708         int rc, cid_count;
9709
9710         switch (ent->driver_data) {
9711         case BCM57710:
9712         case BCM57711:
9713         case BCM57711E:
9714                 cid_count = FP_SB_MAX_E1x;
9715                 break;
9716
9717         case BCM57712:
9718         case BCM57712E:
9719                 cid_count = FP_SB_MAX_E2;
9720                 break;
9721
9722         default:
9723                 pr_err("Unknown board_type (%ld), aborting\n",
9724                            ent->driver_data);
9725                 return -ENODEV;
9726         }
9727
9728         cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
9729
9730         /* dev zeroed in init_etherdev */
9731         dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9732         if (!dev) {
9733                 dev_err(&pdev->dev, "Cannot allocate net device\n");
9734                 return -ENOMEM;
9735         }
9736
9737         bp = netdev_priv(dev);
9738         bp->msg_enable = debug;
9739
9740         pci_set_drvdata(pdev, dev);
9741
9742         bp->l2_cid_count = cid_count;
9743
9744         rc = bnx2x_init_dev(pdev, dev);
9745         if (rc < 0) {
9746                 free_netdev(dev);
9747                 return rc;
9748         }
9749
9750         rc = bnx2x_init_bp(bp);
9751         if (rc)
9752                 goto init_one_exit;
9753
9754         /* calc qm_cid_count */
9755         bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9756
9757 #ifdef BCM_CNIC
9758         /* disable FCOE L2 queue for E1x*/
9759         if (CHIP_IS_E1x(bp))
9760                 bp->flags |= NO_FCOE_FLAG;
9761
9762 #endif
9763
9764         /* Configure interupt mode: try to enable MSI-X/MSI if
9765          * needed, set bp->num_queues appropriately.
9766          */
9767         bnx2x_set_int_mode(bp);
9768
9769         /* Add all NAPI objects */
9770         bnx2x_add_all_napi(bp);
9771
9772         rc = register_netdev(dev);
9773         if (rc) {
9774                 dev_err(&pdev->dev, "Cannot register net device\n");
9775                 goto init_one_exit;
9776         }
9777
9778 #ifdef BCM_CNIC
9779         if (!NO_FCOE(bp)) {
9780                 /* Add storage MAC address */
9781                 rtnl_lock();
9782                 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9783                 rtnl_unlock();
9784         }
9785 #endif
9786
9787         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9788
9789         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9790                " IRQ %d, ", board_info[ent->driver_data].name,
9791                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
9792                pcie_width,
9793                ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9794                  (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9795                                                 "5GHz (Gen2)" : "2.5GHz",
9796                dev->base_addr, bp->pdev->irq);
9797         pr_cont("node addr %pM\n", dev->dev_addr);
9798
9799         return 0;
9800
9801 init_one_exit:
9802         if (bp->regview)
9803                 iounmap(bp->regview);
9804
9805         if (bp->doorbells)
9806                 iounmap(bp->doorbells);
9807
9808         free_netdev(dev);
9809
9810         if (atomic_read(&pdev->enable_cnt) == 1)
9811                 pci_release_regions(pdev);
9812
9813         pci_disable_device(pdev);
9814         pci_set_drvdata(pdev, NULL);
9815
9816         return rc;
9817 }
9818
9819 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9820 {
9821         struct net_device *dev = pci_get_drvdata(pdev);
9822         struct bnx2x *bp;
9823
9824         if (!dev) {
9825                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
9826                 return;
9827         }
9828         bp = netdev_priv(dev);
9829
9830 #ifdef BCM_CNIC
9831         /* Delete storage MAC address */
9832         if (!NO_FCOE(bp)) {
9833                 rtnl_lock();
9834                 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9835                 rtnl_unlock();
9836         }
9837 #endif
9838
9839         unregister_netdev(dev);
9840
9841         /* Delete all NAPI objects */
9842         bnx2x_del_all_napi(bp);
9843
9844         /* Power on: we can't let PCI layer write to us while we are in D3 */
9845         bnx2x_set_power_state(bp, PCI_D0);
9846
9847         /* Disable MSI/MSI-X */
9848         bnx2x_disable_msi(bp);
9849
9850         /* Power off */
9851         bnx2x_set_power_state(bp, PCI_D3hot);
9852
9853         /* Make sure RESET task is not scheduled before continuing */
9854         cancel_delayed_work_sync(&bp->reset_task);
9855
9856         if (bp->regview)
9857                 iounmap(bp->regview);
9858
9859         if (bp->doorbells)
9860                 iounmap(bp->doorbells);
9861
9862         bnx2x_free_mem_bp(bp);
9863
9864         free_netdev(dev);
9865
9866         if (atomic_read(&pdev->enable_cnt) == 1)
9867                 pci_release_regions(pdev);
9868
9869         pci_disable_device(pdev);
9870         pci_set_drvdata(pdev, NULL);
9871 }
9872
9873 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9874 {
9875         int i;
9876
9877         bp->state = BNX2X_STATE_ERROR;
9878
9879         bp->rx_mode = BNX2X_RX_MODE_NONE;
9880
9881         bnx2x_netif_stop(bp, 0);
9882         netif_carrier_off(bp->dev);
9883
9884         del_timer_sync(&bp->timer);
9885         bp->stats_state = STATS_STATE_DISABLED;
9886         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9887
9888         /* Release IRQs */
9889         bnx2x_free_irq(bp);
9890
9891         /* Free SKBs, SGEs, TPA pool and driver internals */
9892         bnx2x_free_skbs(bp);
9893
9894         for_each_rx_queue(bp, i)
9895                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9896
9897         bnx2x_free_mem(bp);
9898
9899         bp->state = BNX2X_STATE_CLOSED;
9900
9901         return 0;
9902 }
9903
9904 static void bnx2x_eeh_recover(struct bnx2x *bp)
9905 {
9906         u32 val;
9907
9908         mutex_init(&bp->port.phy_mutex);
9909
9910         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9911         bp->link_params.shmem_base = bp->common.shmem_base;
9912         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9913
9914         if (!bp->common.shmem_base ||
9915             (bp->common.shmem_base < 0xA0000) ||
9916             (bp->common.shmem_base >= 0xC0000)) {
9917                 BNX2X_DEV_INFO("MCP not active\n");
9918                 bp->flags |= NO_MCP_FLAG;
9919                 return;
9920         }
9921
9922         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9923         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9924                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9925                 BNX2X_ERR("BAD MCP validity signature\n");
9926
9927         if (!BP_NOMCP(bp)) {
9928                 bp->fw_seq =
9929                     (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9930                     DRV_MSG_SEQ_NUMBER_MASK);
9931                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9932         }
9933 }
9934
9935 /**
9936  * bnx2x_io_error_detected - called when PCI error is detected
9937  * @pdev: Pointer to PCI device
9938  * @state: The current pci connection state
9939  *
9940  * This function is called after a PCI bus error affecting
9941  * this device has been detected.
9942  */
9943 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9944                                                 pci_channel_state_t state)
9945 {
9946         struct net_device *dev = pci_get_drvdata(pdev);
9947         struct bnx2x *bp = netdev_priv(dev);
9948
9949         rtnl_lock();
9950
9951         netif_device_detach(dev);
9952
9953         if (state == pci_channel_io_perm_failure) {
9954                 rtnl_unlock();
9955                 return PCI_ERS_RESULT_DISCONNECT;
9956         }
9957
9958         if (netif_running(dev))
9959                 bnx2x_eeh_nic_unload(bp);
9960
9961         pci_disable_device(pdev);
9962
9963         rtnl_unlock();
9964
9965         /* Request a slot reset */
9966         return PCI_ERS_RESULT_NEED_RESET;
9967 }
9968
9969 /**
9970  * bnx2x_io_slot_reset - called after the PCI bus has been reset
9971  * @pdev: Pointer to PCI device
9972  *
9973  * Restart the card from scratch, as if from a cold-boot.
9974  */
9975 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9976 {
9977         struct net_device *dev = pci_get_drvdata(pdev);
9978         struct bnx2x *bp = netdev_priv(dev);
9979
9980         rtnl_lock();
9981
9982         if (pci_enable_device(pdev)) {
9983                 dev_err(&pdev->dev,
9984                         "Cannot re-enable PCI device after reset\n");
9985                 rtnl_unlock();
9986                 return PCI_ERS_RESULT_DISCONNECT;
9987         }
9988
9989         pci_set_master(pdev);
9990         pci_restore_state(pdev);
9991
9992         if (netif_running(dev))
9993                 bnx2x_set_power_state(bp, PCI_D0);
9994
9995         rtnl_unlock();
9996
9997         return PCI_ERS_RESULT_RECOVERED;
9998 }
9999
10000 /**
10001  * bnx2x_io_resume - called when traffic can start flowing again
10002  * @pdev: Pointer to PCI device
10003  *
10004  * This callback is called when the error recovery driver tells us that
10005  * its OK to resume normal operation.
10006  */
10007 static void bnx2x_io_resume(struct pci_dev *pdev)
10008 {
10009         struct net_device *dev = pci_get_drvdata(pdev);
10010         struct bnx2x *bp = netdev_priv(dev);
10011
10012         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10013                 printk(KERN_ERR "Handling parity error recovery. "
10014                                 "Try again later\n");
10015                 return;
10016         }
10017
10018         rtnl_lock();
10019
10020         bnx2x_eeh_recover(bp);
10021
10022         if (netif_running(dev))
10023                 bnx2x_nic_load(bp, LOAD_NORMAL);
10024
10025         netif_device_attach(dev);
10026
10027         rtnl_unlock();
10028 }
10029
10030 static struct pci_error_handlers bnx2x_err_handler = {
10031         .error_detected = bnx2x_io_error_detected,
10032         .slot_reset     = bnx2x_io_slot_reset,
10033         .resume         = bnx2x_io_resume,
10034 };
10035
10036 static struct pci_driver bnx2x_pci_driver = {
10037         .name        = DRV_MODULE_NAME,
10038         .id_table    = bnx2x_pci_tbl,
10039         .probe       = bnx2x_init_one,
10040         .remove      = __devexit_p(bnx2x_remove_one),
10041         .suspend     = bnx2x_suspend,
10042         .resume      = bnx2x_resume,
10043         .err_handler = &bnx2x_err_handler,
10044 };
10045
10046 static int __init bnx2x_init(void)
10047 {
10048         int ret;
10049
10050         pr_info("%s", version);
10051
10052         bnx2x_wq = create_singlethread_workqueue("bnx2x");
10053         if (bnx2x_wq == NULL) {
10054                 pr_err("Cannot create workqueue\n");
10055                 return -ENOMEM;
10056         }
10057
10058         ret = pci_register_driver(&bnx2x_pci_driver);
10059         if (ret) {
10060                 pr_err("Cannot register driver\n");
10061                 destroy_workqueue(bnx2x_wq);
10062         }
10063         return ret;
10064 }
10065
10066 static void __exit bnx2x_cleanup(void)
10067 {
10068         pci_unregister_driver(&bnx2x_pci_driver);
10069
10070         destroy_workqueue(bnx2x_wq);
10071 }
10072
10073 module_init(bnx2x_init);
10074 module_exit(bnx2x_cleanup);
10075
10076 #ifdef BCM_CNIC
10077
10078 /* count denotes the number of new completions we have seen */
10079 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
10080 {
10081         struct eth_spe *spe;
10082
10083 #ifdef BNX2X_STOP_ON_ERROR
10084         if (unlikely(bp->panic))
10085                 return;
10086 #endif
10087
10088         spin_lock_bh(&bp->spq_lock);
10089         BUG_ON(bp->cnic_spq_pending < count);
10090         bp->cnic_spq_pending -= count;
10091
10092
10093         for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
10094                 u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
10095                                 & SPE_HDR_CONN_TYPE) >>
10096                                 SPE_HDR_CONN_TYPE_SHIFT;
10097
10098                 /* Set validation for iSCSI L2 client before sending SETUP
10099                  *  ramrod
10100                  */
10101                 if (type == ETH_CONNECTION_TYPE) {
10102                         u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
10103                                              hdr.conn_and_cmd_data) >>
10104                                 SPE_HDR_CMD_ID_SHIFT) & 0xff;
10105
10106                         if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
10107                                 bnx2x_set_ctx_validation(&bp->context.
10108                                                 vcxt[BNX2X_ISCSI_ETH_CID].eth,
10109                                         HW_CID(bp, BNX2X_ISCSI_ETH_CID));
10110                 }
10111
10112                 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
10113                  * We also check that the number of outstanding
10114                  * COMMON ramrods is not more than the EQ and SPQ can
10115                  * accommodate.
10116                  */
10117                 if (type == ETH_CONNECTION_TYPE) {
10118                         if (!atomic_read(&bp->cq_spq_left))
10119                                 break;
10120                         else
10121                                 atomic_dec(&bp->cq_spq_left);
10122                 } else if (type == NONE_CONNECTION_TYPE) {
10123                         if (!atomic_read(&bp->eq_spq_left))
10124                                 break;
10125                         else
10126                                 atomic_dec(&bp->eq_spq_left);
10127                 } else if ((type == ISCSI_CONNECTION_TYPE) ||
10128                            (type == FCOE_CONNECTION_TYPE)) {
10129                         if (bp->cnic_spq_pending >=
10130                             bp->cnic_eth_dev.max_kwqe_pending)
10131                                 break;
10132                         else
10133                                 bp->cnic_spq_pending++;
10134                 } else {
10135                         BNX2X_ERR("Unknown SPE type: %d\n", type);
10136                         bnx2x_panic();
10137                         break;
10138                 }
10139
10140                 spe = bnx2x_sp_get_next(bp);
10141                 *spe = *bp->cnic_kwq_cons;
10142
10143                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
10144                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
10145
10146                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
10147                         bp->cnic_kwq_cons = bp->cnic_kwq;
10148                 else
10149                         bp->cnic_kwq_cons++;
10150         }
10151         bnx2x_sp_prod_update(bp);
10152         spin_unlock_bh(&bp->spq_lock);
10153 }
10154
10155 static int bnx2x_cnic_sp_queue(struct net_device *dev,
10156                                struct kwqe_16 *kwqes[], u32 count)
10157 {
10158         struct bnx2x *bp = netdev_priv(dev);
10159         int i;
10160
10161 #ifdef BNX2X_STOP_ON_ERROR
10162         if (unlikely(bp->panic))
10163                 return -EIO;
10164 #endif
10165
10166         spin_lock_bh(&bp->spq_lock);
10167
10168         for (i = 0; i < count; i++) {
10169                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
10170
10171                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
10172                         break;
10173
10174                 *bp->cnic_kwq_prod = *spe;
10175
10176                 bp->cnic_kwq_pending++;
10177
10178                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
10179                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
10180                    spe->data.update_data_addr.hi,
10181                    spe->data.update_data_addr.lo,
10182                    bp->cnic_kwq_pending);
10183
10184                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
10185                         bp->cnic_kwq_prod = bp->cnic_kwq;
10186                 else
10187                         bp->cnic_kwq_prod++;
10188         }
10189
10190         spin_unlock_bh(&bp->spq_lock);
10191
10192         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
10193                 bnx2x_cnic_sp_post(bp, 0);
10194
10195         return i;
10196 }
10197
10198 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
10199 {
10200         struct cnic_ops *c_ops;
10201         int rc = 0;
10202
10203         mutex_lock(&bp->cnic_mutex);
10204         c_ops = rcu_dereference_protected(bp->cnic_ops,
10205                                           lockdep_is_held(&bp->cnic_mutex));
10206         if (c_ops)
10207                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
10208         mutex_unlock(&bp->cnic_mutex);
10209
10210         return rc;
10211 }
10212
10213 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
10214 {
10215         struct cnic_ops *c_ops;
10216         int rc = 0;
10217
10218         rcu_read_lock();
10219         c_ops = rcu_dereference(bp->cnic_ops);
10220         if (c_ops)
10221                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
10222         rcu_read_unlock();
10223
10224         return rc;
10225 }
10226
10227 /*
10228  * for commands that have no data
10229  */
10230 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
10231 {
10232         struct cnic_ctl_info ctl = {0};
10233
10234         ctl.cmd = cmd;
10235
10236         return bnx2x_cnic_ctl_send(bp, &ctl);
10237 }
10238
10239 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
10240 {
10241         struct cnic_ctl_info ctl;
10242
10243         /* first we tell CNIC and only then we count this as a completion */
10244         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
10245         ctl.data.comp.cid = cid;
10246
10247         bnx2x_cnic_ctl_send_bh(bp, &ctl);
10248         bnx2x_cnic_sp_post(bp, 0);
10249 }
10250
10251 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
10252 {
10253         struct bnx2x *bp = netdev_priv(dev);
10254         int rc = 0;
10255
10256         switch (ctl->cmd) {
10257         case DRV_CTL_CTXTBL_WR_CMD: {
10258                 u32 index = ctl->data.io.offset;
10259                 dma_addr_t addr = ctl->data.io.dma_addr;
10260
10261                 bnx2x_ilt_wr(bp, index, addr);
10262                 break;
10263         }
10264
10265         case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
10266                 int count = ctl->data.credit.credit_count;
10267
10268                 bnx2x_cnic_sp_post(bp, count);
10269                 break;
10270         }
10271
10272         /* rtnl_lock is held.  */
10273         case DRV_CTL_START_L2_CMD: {
10274                 u32 cli = ctl->data.ring.client_id;
10275
10276                 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
10277                 bnx2x_del_fcoe_eth_macs(bp);
10278
10279                 /* Set iSCSI MAC address */
10280                 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
10281
10282                 mmiowb();
10283                 barrier();
10284
10285                 /* Start accepting on iSCSI L2 ring. Accept all multicasts
10286                  * because it's the only way for UIO Client to accept
10287                  * multicasts (in non-promiscuous mode only one Client per
10288                  * function will receive multicast packets (leading in our
10289                  * case).
10290                  */
10291                 bnx2x_rxq_set_mac_filters(bp, cli,
10292                         BNX2X_ACCEPT_UNICAST |
10293                         BNX2X_ACCEPT_BROADCAST |
10294                         BNX2X_ACCEPT_ALL_MULTICAST);
10295                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10296
10297                 break;
10298         }
10299
10300         /* rtnl_lock is held.  */
10301         case DRV_CTL_STOP_L2_CMD: {
10302                 u32 cli = ctl->data.ring.client_id;
10303
10304                 /* Stop accepting on iSCSI L2 ring */
10305                 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
10306                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10307
10308                 mmiowb();
10309                 barrier();
10310
10311                 /* Unset iSCSI L2 MAC */
10312                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
10313                 break;
10314         }
10315         case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
10316                 int count = ctl->data.credit.credit_count;
10317
10318                 smp_mb__before_atomic_inc();
10319                 atomic_add(count, &bp->cq_spq_left);
10320                 smp_mb__after_atomic_inc();
10321                 break;
10322         }
10323
10324         default:
10325                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10326                 rc = -EINVAL;
10327         }
10328
10329         return rc;
10330 }
10331
10332 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
10333 {
10334         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10335
10336         if (bp->flags & USING_MSIX_FLAG) {
10337                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10338                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10339                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10340         } else {
10341                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10342                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10343         }
10344         if (CHIP_IS_E2(bp))
10345                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10346         else
10347                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10348
10349         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
10350         cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
10351         cp->irq_arr[1].status_blk = bp->def_status_blk;
10352         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
10353         cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
10354
10355         cp->num_irq = 2;
10356 }
10357
10358 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10359                                void *data)
10360 {
10361         struct bnx2x *bp = netdev_priv(dev);
10362         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10363
10364         if (ops == NULL)
10365                 return -EINVAL;
10366
10367         if (atomic_read(&bp->intr_sem) != 0)
10368                 return -EBUSY;
10369
10370         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10371         if (!bp->cnic_kwq)
10372                 return -ENOMEM;
10373
10374         bp->cnic_kwq_cons = bp->cnic_kwq;
10375         bp->cnic_kwq_prod = bp->cnic_kwq;
10376         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10377
10378         bp->cnic_spq_pending = 0;
10379         bp->cnic_kwq_pending = 0;
10380
10381         bp->cnic_data = data;
10382
10383         cp->num_irq = 0;
10384         cp->drv_state = CNIC_DRV_STATE_REGD;
10385         cp->iro_arr = bp->iro_arr;
10386
10387         bnx2x_setup_cnic_irq_info(bp);
10388
10389         rcu_assign_pointer(bp->cnic_ops, ops);
10390
10391         return 0;
10392 }
10393
10394 static int bnx2x_unregister_cnic(struct net_device *dev)
10395 {
10396         struct bnx2x *bp = netdev_priv(dev);
10397         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10398
10399         mutex_lock(&bp->cnic_mutex);
10400         cp->drv_state = 0;
10401         rcu_assign_pointer(bp->cnic_ops, NULL);
10402         mutex_unlock(&bp->cnic_mutex);
10403         synchronize_rcu();
10404         kfree(bp->cnic_kwq);
10405         bp->cnic_kwq = NULL;
10406
10407         return 0;
10408 }
10409
10410 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10411 {
10412         struct bnx2x *bp = netdev_priv(dev);
10413         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10414
10415         /* If both iSCSI and FCoE are disabled - return NULL in
10416          * order to indicate CNIC that it should not try to work
10417          * with this device.
10418          */
10419         if (NO_ISCSI(bp) && NO_FCOE(bp))
10420                 return NULL;
10421
10422         cp->drv_owner = THIS_MODULE;
10423         cp->chip_id = CHIP_ID(bp);
10424         cp->pdev = bp->pdev;
10425         cp->io_base = bp->regview;
10426         cp->io_base2 = bp->doorbells;
10427         cp->max_kwqe_pending = 8;
10428         cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
10429         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10430                              bnx2x_cid_ilt_lines(bp);
10431         cp->ctx_tbl_len = CNIC_ILT_LINES;
10432         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
10433         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10434         cp->drv_ctl = bnx2x_drv_ctl;
10435         cp->drv_register_cnic = bnx2x_register_cnic;
10436         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
10437         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10438         cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10439                 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
10440         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10441
10442         if (NO_ISCSI_OOO(bp))
10443                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10444
10445         if (NO_ISCSI(bp))
10446                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10447
10448         if (NO_FCOE(bp))
10449                 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10450
10451         DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10452                          "starting cid %d\n",
10453            cp->ctx_blk_size,
10454            cp->ctx_tbl_offset,
10455            cp->ctx_tbl_len,
10456            cp->starting_cid);
10457         return cp;
10458 }
10459 EXPORT_SYMBOL(bnx2x_cnic_probe);
10460
10461 #endif /* BCM_CNIC */
10462