Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[pandora-kernel.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
50 #include <linux/io.h>
51 #include <linux/stringify.h>
52
53 #define BNX2X_MAIN
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
58 #include "bnx2x_dcb.h"
59
60 #include <linux/firmware.h>
61 #include "bnx2x_fw_file_hdr.h"
62 /* FW files */
63 #define FW_FILE_VERSION                                 \
64         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
65         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
66         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
67         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
68 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT              (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II "
81                    "BCM57710/57711/57711E/57712/57712E Driver");
82 MODULE_LICENSE("GPL");
83 MODULE_VERSION(DRV_MODULE_VERSION);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1);
85 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
86 MODULE_FIRMWARE(FW_FILE_NAME_E2);
87
88 static int multi_mode = 1;
89 module_param(multi_mode, int, 0);
90 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91                              "(0 Disable; 1 Enable (default))");
92
93 int num_queues;
94 module_param(num_queues, int, 0);
95 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96                                 " (default is as a number of CPUs)");
97
98 static int disable_tpa;
99 module_param(disable_tpa, int, 0);
100 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101
102 static int int_mode;
103 module_param(int_mode, int, 0);
104 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105                                 "(1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static struct workqueue_struct *bnx2x_wq;
124
125 #ifdef BCM_CNIC
126 static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127 #endif
128
129 enum bnx2x_board_type {
130         BCM57710 = 0,
131         BCM57711 = 1,
132         BCM57711E = 2,
133         BCM57712 = 3,
134         BCM57712E = 4
135 };
136
137 /* indexed by board_type, above */
138 static struct {
139         char *name;
140 } board_info[] __devinitdata = {
141         { "Broadcom NetXtreme II BCM57710 XGb" },
142         { "Broadcom NetXtreme II BCM57711 XGb" },
143         { "Broadcom NetXtreme II BCM57711E XGb" },
144         { "Broadcom NetXtreme II BCM57712 XGb" },
145         { "Broadcom NetXtreme II BCM57712E XGb" }
146 };
147
148 #ifndef PCI_DEVICE_ID_NX2_57712
149 #define PCI_DEVICE_ID_NX2_57712         0x1662
150 #endif
151 #ifndef PCI_DEVICE_ID_NX2_57712E
152 #define PCI_DEVICE_ID_NX2_57712E        0x1663
153 #endif
154
155 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
156         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
157         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
158         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
159         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
160         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
161         { 0 }
162 };
163
164 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
165
166 /****************************************************************************
167 * General service functions
168 ****************************************************************************/
169
170 static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
171                                        u32 addr, dma_addr_t mapping)
172 {
173         REG_WR(bp,  addr, U64_LO(mapping));
174         REG_WR(bp,  addr + 4, U64_HI(mapping));
175 }
176
177 static inline void __storm_memset_fill(struct bnx2x *bp,
178                                        u32 addr, size_t size, u32 val)
179 {
180         int i;
181         for (i = 0; i < size/4; i++)
182                 REG_WR(bp,  addr + (i * 4), val);
183 }
184
185 static inline void storm_memset_ustats_zero(struct bnx2x *bp,
186                                             u8 port, u16 stat_id)
187 {
188         size_t size = sizeof(struct ustorm_per_client_stats);
189
190         u32 addr = BAR_USTRORM_INTMEM +
191                         USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
192
193         __storm_memset_fill(bp, addr, size, 0);
194 }
195
196 static inline void storm_memset_tstats_zero(struct bnx2x *bp,
197                                             u8 port, u16 stat_id)
198 {
199         size_t size = sizeof(struct tstorm_per_client_stats);
200
201         u32 addr = BAR_TSTRORM_INTMEM +
202                         TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
203
204         __storm_memset_fill(bp, addr, size, 0);
205 }
206
207 static inline void storm_memset_xstats_zero(struct bnx2x *bp,
208                                             u8 port, u16 stat_id)
209 {
210         size_t size = sizeof(struct xstorm_per_client_stats);
211
212         u32 addr = BAR_XSTRORM_INTMEM +
213                         XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
214
215         __storm_memset_fill(bp, addr, size, 0);
216 }
217
218
219 static inline void storm_memset_spq_addr(struct bnx2x *bp,
220                                          dma_addr_t mapping, u16 abs_fid)
221 {
222         u32 addr = XSEM_REG_FAST_MEMORY +
223                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
224
225         __storm_memset_dma_mapping(bp, addr, mapping);
226 }
227
228 static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
229 {
230         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
231 }
232
233 static inline void storm_memset_func_cfg(struct bnx2x *bp,
234                                 struct tstorm_eth_function_common_config *tcfg,
235                                 u16 abs_fid)
236 {
237         size_t size = sizeof(struct tstorm_eth_function_common_config);
238
239         u32 addr = BAR_TSTRORM_INTMEM +
240                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
241
242         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
243 }
244
245 static inline void storm_memset_xstats_flags(struct bnx2x *bp,
246                                 struct stats_indication_flags *flags,
247                                 u16 abs_fid)
248 {
249         size_t size = sizeof(struct stats_indication_flags);
250
251         u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
252
253         __storm_memset_struct(bp, addr, size, (u32 *)flags);
254 }
255
256 static inline void storm_memset_tstats_flags(struct bnx2x *bp,
257                                 struct stats_indication_flags *flags,
258                                 u16 abs_fid)
259 {
260         size_t size = sizeof(struct stats_indication_flags);
261
262         u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
263
264         __storm_memset_struct(bp, addr, size, (u32 *)flags);
265 }
266
267 static inline void storm_memset_ustats_flags(struct bnx2x *bp,
268                                 struct stats_indication_flags *flags,
269                                 u16 abs_fid)
270 {
271         size_t size = sizeof(struct stats_indication_flags);
272
273         u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
274
275         __storm_memset_struct(bp, addr, size, (u32 *)flags);
276 }
277
278 static inline void storm_memset_cstats_flags(struct bnx2x *bp,
279                                 struct stats_indication_flags *flags,
280                                 u16 abs_fid)
281 {
282         size_t size = sizeof(struct stats_indication_flags);
283
284         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
285
286         __storm_memset_struct(bp, addr, size, (u32 *)flags);
287 }
288
289 static inline void storm_memset_xstats_addr(struct bnx2x *bp,
290                                            dma_addr_t mapping, u16 abs_fid)
291 {
292         u32 addr = BAR_XSTRORM_INTMEM +
293                 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295         __storm_memset_dma_mapping(bp, addr, mapping);
296 }
297
298 static inline void storm_memset_tstats_addr(struct bnx2x *bp,
299                                            dma_addr_t mapping, u16 abs_fid)
300 {
301         u32 addr = BAR_TSTRORM_INTMEM +
302                 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304         __storm_memset_dma_mapping(bp, addr, mapping);
305 }
306
307 static inline void storm_memset_ustats_addr(struct bnx2x *bp,
308                                            dma_addr_t mapping, u16 abs_fid)
309 {
310         u32 addr = BAR_USTRORM_INTMEM +
311                 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
312
313         __storm_memset_dma_mapping(bp, addr, mapping);
314 }
315
316 static inline void storm_memset_cstats_addr(struct bnx2x *bp,
317                                            dma_addr_t mapping, u16 abs_fid)
318 {
319         u32 addr = BAR_CSTRORM_INTMEM +
320                 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
321
322         __storm_memset_dma_mapping(bp, addr, mapping);
323 }
324
325 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
326                                          u16 pf_id)
327 {
328         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
329                 pf_id);
330         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
331                 pf_id);
332         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
333                 pf_id);
334         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
335                 pf_id);
336 }
337
338 static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
339                                         u8 enable)
340 {
341         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
342                 enable);
343         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
344                 enable);
345         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
346                 enable);
347         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
348                 enable);
349 }
350
351 static inline void storm_memset_eq_data(struct bnx2x *bp,
352                                 struct event_ring_data *eq_data,
353                                 u16 pfid)
354 {
355         size_t size = sizeof(struct event_ring_data);
356
357         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
358
359         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
360 }
361
362 static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
363                                         u16 pfid)
364 {
365         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
366         REG_WR16(bp, addr, eq_prod);
367 }
368
369 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
370                                              u16 fw_sb_id, u8 sb_index,
371                                              u8 ticks)
372 {
373
374         int index_offset = CHIP_IS_E2(bp) ?
375                 offsetof(struct hc_status_block_data_e2, index_data) :
376                 offsetof(struct hc_status_block_data_e1x, index_data);
377         u32 addr = BAR_CSTRORM_INTMEM +
378                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
379                         index_offset +
380                         sizeof(struct hc_index_data)*sb_index +
381                         offsetof(struct hc_index_data, timeout);
382         REG_WR8(bp, addr, ticks);
383         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
384                           port, fw_sb_id, sb_index, ticks);
385 }
386 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
387                                              u16 fw_sb_id, u8 sb_index,
388                                              u8 disable)
389 {
390         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
391         int index_offset = CHIP_IS_E2(bp) ?
392                 offsetof(struct hc_status_block_data_e2, index_data) :
393                 offsetof(struct hc_status_block_data_e1x, index_data);
394         u32 addr = BAR_CSTRORM_INTMEM +
395                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
396                         index_offset +
397                         sizeof(struct hc_index_data)*sb_index +
398                         offsetof(struct hc_index_data, flags);
399         u16 flags = REG_RD16(bp, addr);
400         /* clear and set */
401         flags &= ~HC_INDEX_DATA_HC_ENABLED;
402         flags |= enable_flag;
403         REG_WR16(bp, addr, flags);
404         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
405                           port, fw_sb_id, sb_index, disable);
406 }
407
408 /* used only at init
409  * locking is done by mcp
410  */
411 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
412 {
413         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
414         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
415         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
416                                PCICFG_VENDOR_ID_OFFSET);
417 }
418
419 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
420 {
421         u32 val;
422
423         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
424         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
425         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
426                                PCICFG_VENDOR_ID_OFFSET);
427
428         return val;
429 }
430
431 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
432 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
433 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
434 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
435 #define DMAE_DP_DST_NONE        "dst_addr [none]"
436
437 static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
438                           int msglvl)
439 {
440         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
441
442         switch (dmae->opcode & DMAE_COMMAND_DST) {
443         case DMAE_CMD_DST_PCI:
444                 if (src_type == DMAE_CMD_SRC_PCI)
445                         DP(msglvl, "DMAE: opcode 0x%08x\n"
446                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
447                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
448                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
449                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
450                            dmae->comp_addr_hi, dmae->comp_addr_lo,
451                            dmae->comp_val);
452                 else
453                         DP(msglvl, "DMAE: opcode 0x%08x\n"
454                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
455                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
456                            dmae->opcode, dmae->src_addr_lo >> 2,
457                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
458                            dmae->comp_addr_hi, dmae->comp_addr_lo,
459                            dmae->comp_val);
460                 break;
461         case DMAE_CMD_DST_GRC:
462                 if (src_type == DMAE_CMD_SRC_PCI)
463                         DP(msglvl, "DMAE: opcode 0x%08x\n"
464                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
465                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
466                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
467                            dmae->len, dmae->dst_addr_lo >> 2,
468                            dmae->comp_addr_hi, dmae->comp_addr_lo,
469                            dmae->comp_val);
470                 else
471                         DP(msglvl, "DMAE: opcode 0x%08x\n"
472                            "src [%08x], len [%d*4], dst [%08x]\n"
473                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
474                            dmae->opcode, dmae->src_addr_lo >> 2,
475                            dmae->len, dmae->dst_addr_lo >> 2,
476                            dmae->comp_addr_hi, dmae->comp_addr_lo,
477                            dmae->comp_val);
478                 break;
479         default:
480                 if (src_type == DMAE_CMD_SRC_PCI)
481                         DP(msglvl, "DMAE: opcode 0x%08x\n"
482                            DP_LEVEL "src_addr [%x:%08x]  len [%d * 4]  "
483                                     "dst_addr [none]\n"
484                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
485                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
486                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
487                            dmae->comp_val);
488                 else
489                         DP(msglvl, "DMAE: opcode 0x%08x\n"
490                            DP_LEVEL "src_addr [%08x]  len [%d * 4]  "
491                                     "dst_addr [none]\n"
492                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
493                            dmae->opcode, dmae->src_addr_lo >> 2,
494                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
495                            dmae->comp_val);
496                 break;
497         }
498
499 }
500
501 const u32 dmae_reg_go_c[] = {
502         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
503         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
504         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
505         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
506 };
507
508 /* copy command into DMAE command memory and set DMAE command go */
509 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
510 {
511         u32 cmd_offset;
512         int i;
513
514         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
515         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
516                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
517
518                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
519                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
520         }
521         REG_WR(bp, dmae_reg_go_c[idx], 1);
522 }
523
524 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
525 {
526         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
527                            DMAE_CMD_C_ENABLE);
528 }
529
530 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
531 {
532         return opcode & ~DMAE_CMD_SRC_RESET;
533 }
534
535 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
536                              bool with_comp, u8 comp_type)
537 {
538         u32 opcode = 0;
539
540         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
541                    (dst_type << DMAE_COMMAND_DST_SHIFT));
542
543         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
544
545         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
546         opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
547                    (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
548         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
549
550 #ifdef __BIG_ENDIAN
551         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
552 #else
553         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
554 #endif
555         if (with_comp)
556                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
557         return opcode;
558 }
559
560 static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
561                                       struct dmae_command *dmae,
562                                       u8 src_type, u8 dst_type)
563 {
564         memset(dmae, 0, sizeof(struct dmae_command));
565
566         /* set the opcode */
567         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
568                                          true, DMAE_COMP_PCI);
569
570         /* fill in the completion parameters */
571         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
572         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
573         dmae->comp_val = DMAE_COMP_VAL;
574 }
575
576 /* issue a dmae command over the init-channel and wailt for completion */
577 static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
578                                       struct dmae_command *dmae)
579 {
580         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
581         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
582         int rc = 0;
583
584         DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
585            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
586            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
587
588         /* lock the dmae channel */
589         spin_lock_bh(&bp->dmae_lock);
590
591         /* reset completion */
592         *wb_comp = 0;
593
594         /* post the command on the channel used for initializations */
595         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
596
597         /* wait for completion */
598         udelay(5);
599         while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
600                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
601
602                 if (!cnt) {
603                         BNX2X_ERR("DMAE timeout!\n");
604                         rc = DMAE_TIMEOUT;
605                         goto unlock;
606                 }
607                 cnt--;
608                 udelay(50);
609         }
610         if (*wb_comp & DMAE_PCI_ERR_FLAG) {
611                 BNX2X_ERR("DMAE PCI error!\n");
612                 rc = DMAE_PCI_ERROR;
613         }
614
615         DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
616            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
617            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
618
619 unlock:
620         spin_unlock_bh(&bp->dmae_lock);
621         return rc;
622 }
623
624 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
625                       u32 len32)
626 {
627         struct dmae_command dmae;
628
629         if (!bp->dmae_ready) {
630                 u32 *data = bnx2x_sp(bp, wb_data[0]);
631
632                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
633                    "  using indirect\n", dst_addr, len32);
634                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
635                 return;
636         }
637
638         /* set opcode and fixed command fields */
639         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
640
641         /* fill in addresses and len */
642         dmae.src_addr_lo = U64_LO(dma_addr);
643         dmae.src_addr_hi = U64_HI(dma_addr);
644         dmae.dst_addr_lo = dst_addr >> 2;
645         dmae.dst_addr_hi = 0;
646         dmae.len = len32;
647
648         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
649
650         /* issue the command and wait for completion */
651         bnx2x_issue_dmae_with_comp(bp, &dmae);
652 }
653
654 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
655 {
656         struct dmae_command dmae;
657
658         if (!bp->dmae_ready) {
659                 u32 *data = bnx2x_sp(bp, wb_data[0]);
660                 int i;
661
662                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
663                    "  using indirect\n", src_addr, len32);
664                 for (i = 0; i < len32; i++)
665                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
666                 return;
667         }
668
669         /* set opcode and fixed command fields */
670         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
671
672         /* fill in addresses and len */
673         dmae.src_addr_lo = src_addr >> 2;
674         dmae.src_addr_hi = 0;
675         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
676         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
677         dmae.len = len32;
678
679         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
680
681         /* issue the command and wait for completion */
682         bnx2x_issue_dmae_with_comp(bp, &dmae);
683 }
684
685 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
686                                       u32 addr, u32 len)
687 {
688         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
689         int offset = 0;
690
691         while (len > dmae_wr_max) {
692                 bnx2x_write_dmae(bp, phys_addr + offset,
693                                  addr + offset, dmae_wr_max);
694                 offset += dmae_wr_max * 4;
695                 len -= dmae_wr_max;
696         }
697
698         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
699 }
700
701 /* used only for slowpath so not inlined */
702 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
703 {
704         u32 wb_write[2];
705
706         wb_write[0] = val_hi;
707         wb_write[1] = val_lo;
708         REG_WR_DMAE(bp, reg, wb_write, 2);
709 }
710
711 #ifdef USE_WB_RD
712 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
713 {
714         u32 wb_data[2];
715
716         REG_RD_DMAE(bp, reg, wb_data, 2);
717
718         return HILO_U64(wb_data[0], wb_data[1]);
719 }
720 #endif
721
722 static int bnx2x_mc_assert(struct bnx2x *bp)
723 {
724         char last_idx;
725         int i, rc = 0;
726         u32 row0, row1, row2, row3;
727
728         /* XSTORM */
729         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
730                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
731         if (last_idx)
732                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
733
734         /* print the asserts */
735         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
736
737                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
738                               XSTORM_ASSERT_LIST_OFFSET(i));
739                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
740                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
741                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
742                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
743                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
744                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
745
746                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
747                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
748                                   " 0x%08x 0x%08x 0x%08x\n",
749                                   i, row3, row2, row1, row0);
750                         rc++;
751                 } else {
752                         break;
753                 }
754         }
755
756         /* TSTORM */
757         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
758                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
759         if (last_idx)
760                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
761
762         /* print the asserts */
763         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
764
765                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
766                               TSTORM_ASSERT_LIST_OFFSET(i));
767                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
768                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
769                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
770                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
771                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
772                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
773
774                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
775                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
776                                   " 0x%08x 0x%08x 0x%08x\n",
777                                   i, row3, row2, row1, row0);
778                         rc++;
779                 } else {
780                         break;
781                 }
782         }
783
784         /* CSTORM */
785         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
786                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
787         if (last_idx)
788                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
789
790         /* print the asserts */
791         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
792
793                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
794                               CSTORM_ASSERT_LIST_OFFSET(i));
795                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
796                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
797                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
798                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
799                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
800                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
801
802                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
803                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
804                                   " 0x%08x 0x%08x 0x%08x\n",
805                                   i, row3, row2, row1, row0);
806                         rc++;
807                 } else {
808                         break;
809                 }
810         }
811
812         /* USTORM */
813         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
814                            USTORM_ASSERT_LIST_INDEX_OFFSET);
815         if (last_idx)
816                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
817
818         /* print the asserts */
819         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
820
821                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
822                               USTORM_ASSERT_LIST_OFFSET(i));
823                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
824                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
825                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
826                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
827                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
828                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
829
830                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
831                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
832                                   " 0x%08x 0x%08x 0x%08x\n",
833                                   i, row3, row2, row1, row0);
834                         rc++;
835                 } else {
836                         break;
837                 }
838         }
839
840         return rc;
841 }
842
843 static void bnx2x_fw_dump(struct bnx2x *bp)
844 {
845         u32 addr;
846         u32 mark, offset;
847         __be32 data[9];
848         int word;
849         u32 trace_shmem_base;
850         if (BP_NOMCP(bp)) {
851                 BNX2X_ERR("NO MCP - can not dump\n");
852                 return;
853         }
854
855         if (BP_PATH(bp) == 0)
856                 trace_shmem_base = bp->common.shmem_base;
857         else
858                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
859         addr = trace_shmem_base - 0x0800 + 4;
860         mark = REG_RD(bp, addr);
861         mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
862                         + ((mark + 0x3) & ~0x3) - 0x08000000;
863         pr_err("begin fw dump (mark 0x%x)\n", mark);
864
865         pr_err("");
866         for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
867                 for (word = 0; word < 8; word++)
868                         data[word] = htonl(REG_RD(bp, offset + 4*word));
869                 data[8] = 0x0;
870                 pr_cont("%s", (char *)data);
871         }
872         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
873                 for (word = 0; word < 8; word++)
874                         data[word] = htonl(REG_RD(bp, offset + 4*word));
875                 data[8] = 0x0;
876                 pr_cont("%s", (char *)data);
877         }
878         pr_err("end of fw dump\n");
879 }
880
881 void bnx2x_panic_dump(struct bnx2x *bp)
882 {
883         int i;
884         u16 j;
885         struct hc_sp_status_block_data sp_sb_data;
886         int func = BP_FUNC(bp);
887 #ifdef BNX2X_STOP_ON_ERROR
888         u16 start = 0, end = 0;
889 #endif
890
891         bp->stats_state = STATS_STATE_DISABLED;
892         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
893
894         BNX2X_ERR("begin crash dump -----------------\n");
895
896         /* Indices */
897         /* Common */
898         BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
899                   "  spq_prod_idx(0x%x)\n",
900                   bp->def_idx, bp->def_att_idx,
901                   bp->attn_state, bp->spq_prod_idx);
902         BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
903                   bp->def_status_blk->atten_status_block.attn_bits,
904                   bp->def_status_blk->atten_status_block.attn_bits_ack,
905                   bp->def_status_blk->atten_status_block.status_block_id,
906                   bp->def_status_blk->atten_status_block.attn_bits_index);
907         BNX2X_ERR("     def (");
908         for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
909                 pr_cont("0x%x%s",
910                        bp->def_status_blk->sp_sb.index_values[i],
911                        (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
912
913         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
914                 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
915                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
916                         i*sizeof(u32));
917
918         pr_cont("igu_sb_id(0x%x)  igu_seg_id (0x%x) "
919                          "pf_id(0x%x)  vnic_id(0x%x)  "
920                          "vf_id(0x%x)  vf_valid (0x%x)\n",
921                sp_sb_data.igu_sb_id,
922                sp_sb_data.igu_seg_id,
923                sp_sb_data.p_func.pf_id,
924                sp_sb_data.p_func.vnic_id,
925                sp_sb_data.p_func.vf_id,
926                sp_sb_data.p_func.vf_valid);
927
928
929         for_each_eth_queue(bp, i) {
930                 struct bnx2x_fastpath *fp = &bp->fp[i];
931                 int loop;
932                 struct hc_status_block_data_e2 sb_data_e2;
933                 struct hc_status_block_data_e1x sb_data_e1x;
934                 struct hc_status_block_sm  *hc_sm_p =
935                         CHIP_IS_E2(bp) ?
936                         sb_data_e2.common.state_machine :
937                         sb_data_e1x.common.state_machine;
938                 struct hc_index_data *hc_index_p =
939                         CHIP_IS_E2(bp) ?
940                         sb_data_e2.index_data :
941                         sb_data_e1x.index_data;
942                 int data_size;
943                 u32 *sb_data_p;
944
945                 /* Rx */
946                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
947                           "  rx_comp_prod(0x%x)"
948                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
949                           i, fp->rx_bd_prod, fp->rx_bd_cons,
950                           fp->rx_comp_prod,
951                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
952                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
953                           "  fp_hc_idx(0x%x)\n",
954                           fp->rx_sge_prod, fp->last_max_sge,
955                           le16_to_cpu(fp->fp_hc_idx));
956
957                 /* Tx */
958                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
959                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
960                           "  *tx_cons_sb(0x%x)\n",
961                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
962                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
963
964                 loop = CHIP_IS_E2(bp) ?
965                         HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
966
967                 /* host sb data */
968
969 #ifdef BCM_CNIC
970                 if (IS_FCOE_FP(fp))
971                         continue;
972 #endif
973                 BNX2X_ERR("     run indexes (");
974                 for (j = 0; j < HC_SB_MAX_SM; j++)
975                         pr_cont("0x%x%s",
976                                fp->sb_running_index[j],
977                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
978
979                 BNX2X_ERR("     indexes (");
980                 for (j = 0; j < loop; j++)
981                         pr_cont("0x%x%s",
982                                fp->sb_index_values[j],
983                                (j == loop - 1) ? ")" : " ");
984                 /* fw sb data */
985                 data_size = CHIP_IS_E2(bp) ?
986                         sizeof(struct hc_status_block_data_e2) :
987                         sizeof(struct hc_status_block_data_e1x);
988                 data_size /= sizeof(u32);
989                 sb_data_p = CHIP_IS_E2(bp) ?
990                         (u32 *)&sb_data_e2 :
991                         (u32 *)&sb_data_e1x;
992                 /* copy sb data in here */
993                 for (j = 0; j < data_size; j++)
994                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
995                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
996                                 j * sizeof(u32));
997
998                 if (CHIP_IS_E2(bp)) {
999                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
1000                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
1001                                 sb_data_e2.common.p_func.pf_id,
1002                                 sb_data_e2.common.p_func.vf_id,
1003                                 sb_data_e2.common.p_func.vf_valid,
1004                                 sb_data_e2.common.p_func.vnic_id,
1005                                 sb_data_e2.common.same_igu_sb_1b);
1006                 } else {
1007                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
1008                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
1009                                 sb_data_e1x.common.p_func.pf_id,
1010                                 sb_data_e1x.common.p_func.vf_id,
1011                                 sb_data_e1x.common.p_func.vf_valid,
1012                                 sb_data_e1x.common.p_func.vnic_id,
1013                                 sb_data_e1x.common.same_igu_sb_1b);
1014                 }
1015
1016                 /* SB_SMs data */
1017                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1018                         pr_cont("SM[%d] __flags (0x%x) "
1019                                "igu_sb_id (0x%x)  igu_seg_id(0x%x) "
1020                                "time_to_expire (0x%x) "
1021                                "timer_value(0x%x)\n", j,
1022                                hc_sm_p[j].__flags,
1023                                hc_sm_p[j].igu_sb_id,
1024                                hc_sm_p[j].igu_seg_id,
1025                                hc_sm_p[j].time_to_expire,
1026                                hc_sm_p[j].timer_value);
1027                 }
1028
1029                 /* Indecies data */
1030                 for (j = 0; j < loop; j++) {
1031                         pr_cont("INDEX[%d] flags (0x%x) "
1032                                          "timeout (0x%x)\n", j,
1033                                hc_index_p[j].flags,
1034                                hc_index_p[j].timeout);
1035                 }
1036         }
1037
1038 #ifdef BNX2X_STOP_ON_ERROR
1039         /* Rings */
1040         /* Rx */
1041         for_each_rx_queue(bp, i) {
1042                 struct bnx2x_fastpath *fp = &bp->fp[i];
1043
1044                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1045                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1046                 for (j = start; j != end; j = RX_BD(j + 1)) {
1047                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1048                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1049
1050                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1051                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
1052                 }
1053
1054                 start = RX_SGE(fp->rx_sge_prod);
1055                 end = RX_SGE(fp->last_max_sge);
1056                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1057                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1058                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1059
1060                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1061                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1062                 }
1063
1064                 start = RCQ_BD(fp->rx_comp_cons - 10);
1065                 end = RCQ_BD(fp->rx_comp_cons + 503);
1066                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1067                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1068
1069                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1070                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1071                 }
1072         }
1073
1074         /* Tx */
1075         for_each_tx_queue(bp, i) {
1076                 struct bnx2x_fastpath *fp = &bp->fp[i];
1077
1078                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1079                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1080                 for (j = start; j != end; j = TX_BD(j + 1)) {
1081                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1082
1083                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1084                                   i, j, sw_bd->skb, sw_bd->first_bd);
1085                 }
1086
1087                 start = TX_BD(fp->tx_bd_cons - 10);
1088                 end = TX_BD(fp->tx_bd_cons + 254);
1089                 for (j = start; j != end; j = TX_BD(j + 1)) {
1090                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1091
1092                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1093                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
1094                 }
1095         }
1096 #endif
1097         bnx2x_fw_dump(bp);
1098         bnx2x_mc_assert(bp);
1099         BNX2X_ERR("end crash dump -----------------\n");
1100 }
1101
1102 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1103 {
1104         int port = BP_PORT(bp);
1105         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1106         u32 val = REG_RD(bp, addr);
1107         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1108         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1109
1110         if (msix) {
1111                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1112                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1113                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1114                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1115         } else if (msi) {
1116                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1117                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1118                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1119                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1120         } else {
1121                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1122                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1123                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1124                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1125
1126                 if (!CHIP_IS_E1(bp)) {
1127                         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1128                            val, port, addr);
1129
1130                         REG_WR(bp, addr, val);
1131
1132                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1133                 }
1134         }
1135
1136         if (CHIP_IS_E1(bp))
1137                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1138
1139         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
1140            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1141
1142         REG_WR(bp, addr, val);
1143         /*
1144          * Ensure that HC_CONFIG is written before leading/trailing edge config
1145          */
1146         mmiowb();
1147         barrier();
1148
1149         if (!CHIP_IS_E1(bp)) {
1150                 /* init leading/trailing edge */
1151                 if (IS_MF(bp)) {
1152                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1153                         if (bp->port.pmf)
1154                                 /* enable nig and gpio3 attention */
1155                                 val |= 0x1100;
1156                 } else
1157                         val = 0xffff;
1158
1159                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1160                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1161         }
1162
1163         /* Make sure that interrupts are indeed enabled from here on */
1164         mmiowb();
1165 }
1166
1167 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1168 {
1169         u32 val;
1170         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1171         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1172
1173         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1174
1175         if (msix) {
1176                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1177                          IGU_PF_CONF_SINGLE_ISR_EN);
1178                 val |= (IGU_PF_CONF_FUNC_EN |
1179                         IGU_PF_CONF_MSI_MSIX_EN |
1180                         IGU_PF_CONF_ATTN_BIT_EN);
1181         } else if (msi) {
1182                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1183                 val |= (IGU_PF_CONF_FUNC_EN |
1184                         IGU_PF_CONF_MSI_MSIX_EN |
1185                         IGU_PF_CONF_ATTN_BIT_EN |
1186                         IGU_PF_CONF_SINGLE_ISR_EN);
1187         } else {
1188                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1189                 val |= (IGU_PF_CONF_FUNC_EN |
1190                         IGU_PF_CONF_INT_LINE_EN |
1191                         IGU_PF_CONF_ATTN_BIT_EN |
1192                         IGU_PF_CONF_SINGLE_ISR_EN);
1193         }
1194
1195         DP(NETIF_MSG_INTR, "write 0x%x to IGU  mode %s\n",
1196            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1197
1198         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1199
1200         barrier();
1201
1202         /* init leading/trailing edge */
1203         if (IS_MF(bp)) {
1204                 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1205                 if (bp->port.pmf)
1206                         /* enable nig and gpio3 attention */
1207                         val |= 0x1100;
1208         } else
1209                 val = 0xffff;
1210
1211         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1212         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1213
1214         /* Make sure that interrupts are indeed enabled from here on */
1215         mmiowb();
1216 }
1217
1218 void bnx2x_int_enable(struct bnx2x *bp)
1219 {
1220         if (bp->common.int_block == INT_BLOCK_HC)
1221                 bnx2x_hc_int_enable(bp);
1222         else
1223                 bnx2x_igu_int_enable(bp);
1224 }
1225
1226 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1227 {
1228         int port = BP_PORT(bp);
1229         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1230         u32 val = REG_RD(bp, addr);
1231
1232         /*
1233          * in E1 we must use only PCI configuration space to disable
1234          * MSI/MSIX capablility
1235          * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1236          */
1237         if (CHIP_IS_E1(bp)) {
1238                 /*  Since IGU_PF_CONF_MSI_MSIX_EN still always on
1239                  *  Use mask register to prevent from HC sending interrupts
1240                  *  after we exit the function
1241                  */
1242                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1243
1244                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1245                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
1246                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1247         } else
1248                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1249                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1250                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
1251                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1252
1253         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1254            val, port, addr);
1255
1256         /* flush all outstanding writes */
1257         mmiowb();
1258
1259         REG_WR(bp, addr, val);
1260         if (REG_RD(bp, addr) != val)
1261                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1262 }
1263
1264 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1265 {
1266         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1267
1268         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1269                  IGU_PF_CONF_INT_LINE_EN |
1270                  IGU_PF_CONF_ATTN_BIT_EN);
1271
1272         DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1273
1274         /* flush all outstanding writes */
1275         mmiowb();
1276
1277         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1278         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1279                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1280 }
1281
1282 static void bnx2x_int_disable(struct bnx2x *bp)
1283 {
1284         if (bp->common.int_block == INT_BLOCK_HC)
1285                 bnx2x_hc_int_disable(bp);
1286         else
1287                 bnx2x_igu_int_disable(bp);
1288 }
1289
1290 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1291 {
1292         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1293         int i, offset;
1294
1295         /* disable interrupt handling */
1296         atomic_inc(&bp->intr_sem);
1297         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1298
1299         if (disable_hw)
1300                 /* prevent the HW from sending interrupts */
1301                 bnx2x_int_disable(bp);
1302
1303         /* make sure all ISRs are done */
1304         if (msix) {
1305                 synchronize_irq(bp->msix_table[0].vector);
1306                 offset = 1;
1307 #ifdef BCM_CNIC
1308                 offset++;
1309 #endif
1310                 for_each_eth_queue(bp, i)
1311                         synchronize_irq(bp->msix_table[i + offset].vector);
1312         } else
1313                 synchronize_irq(bp->pdev->irq);
1314
1315         /* make sure sp_task is not running */
1316         cancel_delayed_work(&bp->sp_task);
1317         flush_workqueue(bnx2x_wq);
1318 }
1319
1320 /* fast path */
1321
1322 /*
1323  * General service functions
1324  */
1325
1326 /* Return true if succeeded to acquire the lock */
1327 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1328 {
1329         u32 lock_status;
1330         u32 resource_bit = (1 << resource);
1331         int func = BP_FUNC(bp);
1332         u32 hw_lock_control_reg;
1333
1334         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1335
1336         /* Validating that the resource is within range */
1337         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1338                 DP(NETIF_MSG_HW,
1339                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1340                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1341                 return false;
1342         }
1343
1344         if (func <= 5)
1345                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1346         else
1347                 hw_lock_control_reg =
1348                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1349
1350         /* Try to acquire the lock */
1351         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1352         lock_status = REG_RD(bp, hw_lock_control_reg);
1353         if (lock_status & resource_bit)
1354                 return true;
1355
1356         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1357         return false;
1358 }
1359
1360 #ifdef BCM_CNIC
1361 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1362 #endif
1363
1364 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1365                            union eth_rx_cqe *rr_cqe)
1366 {
1367         struct bnx2x *bp = fp->bp;
1368         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1369         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1370
1371         DP(BNX2X_MSG_SP,
1372            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1373            fp->index, cid, command, bp->state,
1374            rr_cqe->ramrod_cqe.ramrod_type);
1375
1376         switch (command | fp->state) {
1377         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1378                 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1379                 fp->state = BNX2X_FP_STATE_OPEN;
1380                 break;
1381
1382         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1383                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1384                 fp->state = BNX2X_FP_STATE_HALTED;
1385                 break;
1386
1387         case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1388                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1389                 fp->state = BNX2X_FP_STATE_TERMINATED;
1390                 break;
1391
1392         default:
1393                 BNX2X_ERR("unexpected MC reply (%d)  "
1394                           "fp[%d] state is %x\n",
1395                           command, fp->index, fp->state);
1396                 break;
1397         }
1398
1399         smp_mb__before_atomic_inc();
1400         atomic_inc(&bp->cq_spq_left);
1401         /* push the change in fp->state and towards the memory */
1402         smp_wmb();
1403
1404         return;
1405 }
1406
1407 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1408 {
1409         struct bnx2x *bp = netdev_priv(dev_instance);
1410         u16 status = bnx2x_ack_int(bp);
1411         u16 mask;
1412         int i;
1413
1414         /* Return here if interrupt is shared and it's not for us */
1415         if (unlikely(status == 0)) {
1416                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1417                 return IRQ_NONE;
1418         }
1419         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1420
1421         /* Return here if interrupt is disabled */
1422         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1423                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1424                 return IRQ_HANDLED;
1425         }
1426
1427 #ifdef BNX2X_STOP_ON_ERROR
1428         if (unlikely(bp->panic))
1429                 return IRQ_HANDLED;
1430 #endif
1431
1432         for_each_eth_queue(bp, i) {
1433                 struct bnx2x_fastpath *fp = &bp->fp[i];
1434
1435                 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1436                 if (status & mask) {
1437                         /* Handle Rx and Tx according to SB id */
1438                         prefetch(fp->rx_cons_sb);
1439                         prefetch(fp->tx_cons_sb);
1440                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1441                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1442                         status &= ~mask;
1443                 }
1444         }
1445
1446 #ifdef BCM_CNIC
1447         mask = 0x2;
1448         if (status & (mask | 0x1)) {
1449                 struct cnic_ops *c_ops = NULL;
1450
1451                 rcu_read_lock();
1452                 c_ops = rcu_dereference(bp->cnic_ops);
1453                 if (c_ops)
1454                         c_ops->cnic_handler(bp->cnic_data, NULL);
1455                 rcu_read_unlock();
1456
1457                 status &= ~mask;
1458         }
1459 #endif
1460
1461         if (unlikely(status & 0x1)) {
1462                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1463
1464                 status &= ~0x1;
1465                 if (!status)
1466                         return IRQ_HANDLED;
1467         }
1468
1469         if (unlikely(status))
1470                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1471                    status);
1472
1473         return IRQ_HANDLED;
1474 }
1475
1476 /* end of fast path */
1477
1478
1479 /* Link */
1480
1481 /*
1482  * General service functions
1483  */
1484
1485 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1486 {
1487         u32 lock_status;
1488         u32 resource_bit = (1 << resource);
1489         int func = BP_FUNC(bp);
1490         u32 hw_lock_control_reg;
1491         int cnt;
1492
1493         /* Validating that the resource is within range */
1494         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1495                 DP(NETIF_MSG_HW,
1496                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1497                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1498                 return -EINVAL;
1499         }
1500
1501         if (func <= 5) {
1502                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1503         } else {
1504                 hw_lock_control_reg =
1505                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1506         }
1507
1508         /* Validating that the resource is not already taken */
1509         lock_status = REG_RD(bp, hw_lock_control_reg);
1510         if (lock_status & resource_bit) {
1511                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1512                    lock_status, resource_bit);
1513                 return -EEXIST;
1514         }
1515
1516         /* Try for 5 second every 5ms */
1517         for (cnt = 0; cnt < 1000; cnt++) {
1518                 /* Try to acquire the lock */
1519                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1520                 lock_status = REG_RD(bp, hw_lock_control_reg);
1521                 if (lock_status & resource_bit)
1522                         return 0;
1523
1524                 msleep(5);
1525         }
1526         DP(NETIF_MSG_HW, "Timeout\n");
1527         return -EAGAIN;
1528 }
1529
1530 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1531 {
1532         u32 lock_status;
1533         u32 resource_bit = (1 << resource);
1534         int func = BP_FUNC(bp);
1535         u32 hw_lock_control_reg;
1536
1537         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1538
1539         /* Validating that the resource is within range */
1540         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1541                 DP(NETIF_MSG_HW,
1542                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1543                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1544                 return -EINVAL;
1545         }
1546
1547         if (func <= 5) {
1548                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1549         } else {
1550                 hw_lock_control_reg =
1551                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1552         }
1553
1554         /* Validating that the resource is currently taken */
1555         lock_status = REG_RD(bp, hw_lock_control_reg);
1556         if (!(lock_status & resource_bit)) {
1557                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1558                    lock_status, resource_bit);
1559                 return -EFAULT;
1560         }
1561
1562         REG_WR(bp, hw_lock_control_reg, resource_bit);
1563         return 0;
1564 }
1565
1566
1567 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1568 {
1569         /* The GPIO should be swapped if swap register is set and active */
1570         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1571                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1572         int gpio_shift = gpio_num +
1573                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1574         u32 gpio_mask = (1 << gpio_shift);
1575         u32 gpio_reg;
1576         int value;
1577
1578         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1579                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1580                 return -EINVAL;
1581         }
1582
1583         /* read GPIO value */
1584         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1585
1586         /* get the requested pin value */
1587         if ((gpio_reg & gpio_mask) == gpio_mask)
1588                 value = 1;
1589         else
1590                 value = 0;
1591
1592         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1593
1594         return value;
1595 }
1596
1597 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1598 {
1599         /* The GPIO should be swapped if swap register is set and active */
1600         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1601                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1602         int gpio_shift = gpio_num +
1603                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1604         u32 gpio_mask = (1 << gpio_shift);
1605         u32 gpio_reg;
1606
1607         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1608                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1609                 return -EINVAL;
1610         }
1611
1612         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1613         /* read GPIO and mask except the float bits */
1614         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1615
1616         switch (mode) {
1617         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1618                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1619                    gpio_num, gpio_shift);
1620                 /* clear FLOAT and set CLR */
1621                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1622                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1623                 break;
1624
1625         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1626                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1627                    gpio_num, gpio_shift);
1628                 /* clear FLOAT and set SET */
1629                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1630                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1631                 break;
1632
1633         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1634                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1635                    gpio_num, gpio_shift);
1636                 /* set FLOAT */
1637                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1638                 break;
1639
1640         default:
1641                 break;
1642         }
1643
1644         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1645         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1646
1647         return 0;
1648 }
1649
1650 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1651 {
1652         /* The GPIO should be swapped if swap register is set and active */
1653         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1654                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1655         int gpio_shift = gpio_num +
1656                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1657         u32 gpio_mask = (1 << gpio_shift);
1658         u32 gpio_reg;
1659
1660         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1661                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1662                 return -EINVAL;
1663         }
1664
1665         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1666         /* read GPIO int */
1667         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1668
1669         switch (mode) {
1670         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1671                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1672                                    "output low\n", gpio_num, gpio_shift);
1673                 /* clear SET and set CLR */
1674                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1676                 break;
1677
1678         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1679                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1680                                    "output high\n", gpio_num, gpio_shift);
1681                 /* clear CLR and set SET */
1682                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1683                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1684                 break;
1685
1686         default:
1687                 break;
1688         }
1689
1690         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1691         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1692
1693         return 0;
1694 }
1695
1696 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1697 {
1698         u32 spio_mask = (1 << spio_num);
1699         u32 spio_reg;
1700
1701         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1702             (spio_num > MISC_REGISTERS_SPIO_7)) {
1703                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1704                 return -EINVAL;
1705         }
1706
1707         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1708         /* read SPIO and mask except the float bits */
1709         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1710
1711         switch (mode) {
1712         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1713                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1714                 /* clear FLOAT and set CLR */
1715                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1716                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1717                 break;
1718
1719         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1720                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1721                 /* clear FLOAT and set SET */
1722                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1723                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1724                 break;
1725
1726         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1727                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1728                 /* set FLOAT */
1729                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1730                 break;
1731
1732         default:
1733                 break;
1734         }
1735
1736         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1737         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1738
1739         return 0;
1740 }
1741
1742 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1743 {
1744         u32 sel_phy_idx = 0;
1745         if (bp->link_vars.link_up) {
1746                 sel_phy_idx = EXT_PHY1;
1747                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1748                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1749                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1750                         sel_phy_idx = EXT_PHY2;
1751         } else {
1752
1753                 switch (bnx2x_phy_selection(&bp->link_params)) {
1754                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1755                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1756                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1757                        sel_phy_idx = EXT_PHY1;
1758                        break;
1759                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1760                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1761                        sel_phy_idx = EXT_PHY2;
1762                        break;
1763                 }
1764         }
1765         /*
1766         * The selected actived PHY is always after swapping (in case PHY
1767         * swapping is enabled). So when swapping is enabled, we need to reverse
1768         * the configuration
1769         */
1770
1771         if (bp->link_params.multi_phy_config &
1772             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1773                 if (sel_phy_idx == EXT_PHY1)
1774                         sel_phy_idx = EXT_PHY2;
1775                 else if (sel_phy_idx == EXT_PHY2)
1776                         sel_phy_idx = EXT_PHY1;
1777         }
1778         return LINK_CONFIG_IDX(sel_phy_idx);
1779 }
1780
1781 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1782 {
1783         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1784         switch (bp->link_vars.ieee_fc &
1785                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1786         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1787                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1788                                                    ADVERTISED_Pause);
1789                 break;
1790
1791         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1792                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1793                                                   ADVERTISED_Pause);
1794                 break;
1795
1796         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1797                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1798                 break;
1799
1800         default:
1801                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1802                                                    ADVERTISED_Pause);
1803                 break;
1804         }
1805 }
1806
1807 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1808 {
1809         if (!BP_NOMCP(bp)) {
1810                 u8 rc;
1811                 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1812                 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1813                 /* Initialize link parameters structure variables */
1814                 /* It is recommended to turn off RX FC for jumbo frames
1815                    for better performance */
1816                 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1817                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1818                 else
1819                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1820
1821                 bnx2x_acquire_phy_lock(bp);
1822
1823                 if (load_mode == LOAD_DIAG) {
1824                         bp->link_params.loopback_mode = LOOPBACK_XGXS;
1825                         bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1826                 }
1827
1828                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1829
1830                 bnx2x_release_phy_lock(bp);
1831
1832                 bnx2x_calc_fc_adv(bp);
1833
1834                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1835                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1836                         bnx2x_link_report(bp);
1837                 }
1838                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1839                 return rc;
1840         }
1841         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1842         return -EINVAL;
1843 }
1844
1845 void bnx2x_link_set(struct bnx2x *bp)
1846 {
1847         if (!BP_NOMCP(bp)) {
1848                 bnx2x_acquire_phy_lock(bp);
1849                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1850                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1851                 bnx2x_release_phy_lock(bp);
1852
1853                 bnx2x_calc_fc_adv(bp);
1854         } else
1855                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1856 }
1857
1858 static void bnx2x__link_reset(struct bnx2x *bp)
1859 {
1860         if (!BP_NOMCP(bp)) {
1861                 bnx2x_acquire_phy_lock(bp);
1862                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1863                 bnx2x_release_phy_lock(bp);
1864         } else
1865                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1866 }
1867
1868 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1869 {
1870         u8 rc = 0;
1871
1872         if (!BP_NOMCP(bp)) {
1873                 bnx2x_acquire_phy_lock(bp);
1874                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1875                                      is_serdes);
1876                 bnx2x_release_phy_lock(bp);
1877         } else
1878                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1879
1880         return rc;
1881 }
1882
1883 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1884 {
1885         u32 r_param = bp->link_vars.line_speed / 8;
1886         u32 fair_periodic_timeout_usec;
1887         u32 t_fair;
1888
1889         memset(&(bp->cmng.rs_vars), 0,
1890                sizeof(struct rate_shaping_vars_per_port));
1891         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1892
1893         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1894         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1895
1896         /* this is the threshold below which no timer arming will occur
1897            1.25 coefficient is for the threshold to be a little bigger
1898            than the real time, to compensate for timer in-accuracy */
1899         bp->cmng.rs_vars.rs_threshold =
1900                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1901
1902         /* resolution of fairness timer */
1903         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1904         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1905         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1906
1907         /* this is the threshold below which we won't arm the timer anymore */
1908         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1909
1910         /* we multiply by 1e3/8 to get bytes/msec.
1911            We don't want the credits to pass a credit
1912            of the t_fair*FAIR_MEM (algorithm resolution) */
1913         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1914         /* since each tick is 4 usec */
1915         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1916 }
1917
1918 /* Calculates the sum of vn_min_rates.
1919    It's needed for further normalizing of the min_rates.
1920    Returns:
1921      sum of vn_min_rates.
1922        or
1923      0 - if all the min_rates are 0.
1924      In the later case fainess algorithm should be deactivated.
1925      If not all min_rates are zero then those that are zeroes will be set to 1.
1926  */
1927 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1928 {
1929         int all_zero = 1;
1930         int vn;
1931
1932         bp->vn_weight_sum = 0;
1933         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1934                 u32 vn_cfg = bp->mf_config[vn];
1935                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1936                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1937
1938                 /* Skip hidden vns */
1939                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1940                         continue;
1941
1942                 /* If min rate is zero - set it to 1 */
1943                 if (!vn_min_rate)
1944                         vn_min_rate = DEF_MIN_RATE;
1945                 else
1946                         all_zero = 0;
1947
1948                 bp->vn_weight_sum += vn_min_rate;
1949         }
1950
1951         /* ... only if all min rates are zeros - disable fairness */
1952         if (all_zero) {
1953                 bp->cmng.flags.cmng_enables &=
1954                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1955                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1956                    "  fairness will be disabled\n");
1957         } else
1958                 bp->cmng.flags.cmng_enables |=
1959                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1960 }
1961
1962 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1963 {
1964         struct rate_shaping_vars_per_vn m_rs_vn;
1965         struct fairness_vars_per_vn m_fair_vn;
1966         u32 vn_cfg = bp->mf_config[vn];
1967         int func = 2*vn + BP_PORT(bp);
1968         u16 vn_min_rate, vn_max_rate;
1969         int i;
1970
1971         /* If function is hidden - set min and max to zeroes */
1972         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1973                 vn_min_rate = 0;
1974                 vn_max_rate = 0;
1975
1976         } else {
1977                 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1978
1979                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1980                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1981                 /* If fairness is enabled (not all min rates are zeroes) and
1982                    if current min rate is zero - set it to 1.
1983                    This is a requirement of the algorithm. */
1984                 if (bp->vn_weight_sum && (vn_min_rate == 0))
1985                         vn_min_rate = DEF_MIN_RATE;
1986
1987                 if (IS_MF_SI(bp))
1988                         /* maxCfg in percents of linkspeed */
1989                         vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1990                 else
1991                         /* maxCfg is absolute in 100Mb units */
1992                         vn_max_rate = maxCfg * 100;
1993         }
1994
1995         DP(NETIF_MSG_IFUP,
1996            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1997            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1998
1999         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2000         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2001
2002         /* global vn counter - maximal Mbps for this vn */
2003         m_rs_vn.vn_counter.rate = vn_max_rate;
2004
2005         /* quota - number of bytes transmitted in this period */
2006         m_rs_vn.vn_counter.quota =
2007                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2008
2009         if (bp->vn_weight_sum) {
2010                 /* credit for each period of the fairness algorithm:
2011                    number of bytes in T_FAIR (the vn share the port rate).
2012                    vn_weight_sum should not be larger than 10000, thus
2013                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2014                    than zero */
2015                 m_fair_vn.vn_credit_delta =
2016                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2017                                                    (8 * bp->vn_weight_sum))),
2018                               (bp->cmng.fair_vars.fair_threshold +
2019                                                         MIN_ABOVE_THRESH));
2020                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2021                    m_fair_vn.vn_credit_delta);
2022         }
2023
2024         /* Store it to internal memory */
2025         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2026                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2027                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2028                        ((u32 *)(&m_rs_vn))[i]);
2029
2030         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2031                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2032                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2033                        ((u32 *)(&m_fair_vn))[i]);
2034 }
2035
2036 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2037 {
2038         if (CHIP_REV_IS_SLOW(bp))
2039                 return CMNG_FNS_NONE;
2040         if (IS_MF(bp))
2041                 return CMNG_FNS_MINMAX;
2042
2043         return CMNG_FNS_NONE;
2044 }
2045
2046 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2047 {
2048         int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2049
2050         if (BP_NOMCP(bp))
2051                 return; /* what should be the default bvalue in this case */
2052
2053         /* For 2 port configuration the absolute function number formula
2054          * is:
2055          *      abs_func = 2 * vn + BP_PORT + BP_PATH
2056          *
2057          *      and there are 4 functions per port
2058          *
2059          * For 4 port configuration it is
2060          *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2061          *
2062          *      and there are 2 functions per port
2063          */
2064         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2065                 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2066
2067                 if (func >= E1H_FUNC_MAX)
2068                         break;
2069
2070                 bp->mf_config[vn] =
2071                         MF_CFG_RD(bp, func_mf_config[func].config);
2072         }
2073 }
2074
2075 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2076 {
2077
2078         if (cmng_type == CMNG_FNS_MINMAX) {
2079                 int vn;
2080
2081                 /* clear cmng_enables */
2082                 bp->cmng.flags.cmng_enables = 0;
2083
2084                 /* read mf conf from shmem */
2085                 if (read_cfg)
2086                         bnx2x_read_mf_cfg(bp);
2087
2088                 /* Init rate shaping and fairness contexts */
2089                 bnx2x_init_port_minmax(bp);
2090
2091                 /* vn_weight_sum and enable fairness if not 0 */
2092                 bnx2x_calc_vn_weight_sum(bp);
2093
2094                 /* calculate and set min-max rate for each vn */
2095                 if (bp->port.pmf)
2096                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2097                                 bnx2x_init_vn_minmax(bp, vn);
2098
2099                 /* always enable rate shaping and fairness */
2100                 bp->cmng.flags.cmng_enables |=
2101                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2102                 if (!bp->vn_weight_sum)
2103                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2104                                    "  fairness will be disabled\n");
2105                 return;
2106         }
2107
2108         /* rate shaping and fairness are disabled */
2109         DP(NETIF_MSG_IFUP,
2110            "rate shaping and fairness are disabled\n");
2111 }
2112
2113 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2114 {
2115         int port = BP_PORT(bp);
2116         int func;
2117         int vn;
2118
2119         /* Set the attention towards other drivers on the same port */
2120         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2121                 if (vn == BP_E1HVN(bp))
2122                         continue;
2123
2124                 func = ((vn << 1) | port);
2125                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2126                        (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2127         }
2128 }
2129
2130 /* This function is called upon link interrupt */
2131 static void bnx2x_link_attn(struct bnx2x *bp)
2132 {
2133         u32 prev_link_status = bp->link_vars.link_status;
2134         /* Make sure that we are synced with the current statistics */
2135         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2136
2137         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2138
2139         if (bp->link_vars.link_up) {
2140
2141                 /* dropless flow control */
2142                 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2143                         int port = BP_PORT(bp);
2144                         u32 pause_enabled = 0;
2145
2146                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2147                                 pause_enabled = 1;
2148
2149                         REG_WR(bp, BAR_USTRORM_INTMEM +
2150                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2151                                pause_enabled);
2152                 }
2153
2154                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2155                         struct host_port_stats *pstats;
2156
2157                         pstats = bnx2x_sp(bp, port_stats);
2158                         /* reset old bmac stats */
2159                         memset(&(pstats->mac_stx[0]), 0,
2160                                sizeof(struct mac_stx));
2161                 }
2162                 if (bp->state == BNX2X_STATE_OPEN)
2163                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2164         }
2165
2166         if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2167                 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2168
2169                 if (cmng_fns != CMNG_FNS_NONE) {
2170                         bnx2x_cmng_fns_init(bp, false, cmng_fns);
2171                         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2172                 } else
2173                         /* rate shaping and fairness are disabled */
2174                         DP(NETIF_MSG_IFUP,
2175                            "single function mode without fairness\n");
2176         }
2177
2178         if (IS_MF(bp))
2179                 bnx2x_link_sync_notify(bp);
2180
2181         /* indicate link status only if link status actually changed */
2182         if (prev_link_status != bp->link_vars.link_status)
2183                 bnx2x_link_report(bp);
2184 }
2185
2186 void bnx2x__link_status_update(struct bnx2x *bp)
2187 {
2188         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2189                 return;
2190
2191         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2192
2193         if (bp->link_vars.link_up)
2194                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2195         else
2196                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2197
2198         /* the link status update could be the result of a DCC event
2199            hence re-read the shmem mf configuration */
2200         bnx2x_read_mf_cfg(bp);
2201
2202         /* indicate link status */
2203         bnx2x_link_report(bp);
2204 }
2205
2206 static void bnx2x_pmf_update(struct bnx2x *bp)
2207 {
2208         int port = BP_PORT(bp);
2209         u32 val;
2210
2211         bp->port.pmf = 1;
2212         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2213
2214         /* enable nig attention */
2215         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2216         if (bp->common.int_block == INT_BLOCK_HC) {
2217                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2218                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2219         } else if (CHIP_IS_E2(bp)) {
2220                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2221                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2222         }
2223
2224         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2225 }
2226
2227 /* end of Link */
2228
2229 /* slow path */
2230
2231 /*
2232  * General service functions
2233  */
2234
2235 /* send the MCP a request, block until there is a reply */
2236 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2237 {
2238         int mb_idx = BP_FW_MB_IDX(bp);
2239         u32 seq = ++bp->fw_seq;
2240         u32 rc = 0;
2241         u32 cnt = 1;
2242         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2243
2244         mutex_lock(&bp->fw_mb_mutex);
2245         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2246         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2247
2248         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2249
2250         do {
2251                 /* let the FW do it's magic ... */
2252                 msleep(delay);
2253
2254                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2255
2256                 /* Give the FW up to 5 second (500*10ms) */
2257         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2258
2259         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2260            cnt*delay, rc, seq);
2261
2262         /* is this a reply to our command? */
2263         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2264                 rc &= FW_MSG_CODE_MASK;
2265         else {
2266                 /* FW BUG! */
2267                 BNX2X_ERR("FW failed to respond!\n");
2268                 bnx2x_fw_dump(bp);
2269                 rc = 0;
2270         }
2271         mutex_unlock(&bp->fw_mb_mutex);
2272
2273         return rc;
2274 }
2275
2276 static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2277 {
2278 #ifdef BCM_CNIC
2279         if (IS_FCOE_FP(fp) && IS_MF(bp))
2280                 return false;
2281 #endif
2282         return true;
2283 }
2284
2285 /* must be called under rtnl_lock */
2286 static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2287 {
2288         u32 mask = (1 << cl_id);
2289
2290         /* initial seeting is BNX2X_ACCEPT_NONE */
2291         u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2292         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2293         u8 unmatched_unicast = 0;
2294
2295         if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2296                 unmatched_unicast = 1;
2297
2298         if (filters & BNX2X_PROMISCUOUS_MODE) {
2299                 /* promiscious - accept all, drop none */
2300                 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2301                 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2302                 if (IS_MF_SI(bp)) {
2303                         /*
2304                          * SI mode defines to accept in promiscuos mode
2305                          * only unmatched packets
2306                          */
2307                         unmatched_unicast = 1;
2308                         accp_all_ucast = 0;
2309                 }
2310         }
2311         if (filters & BNX2X_ACCEPT_UNICAST) {
2312                 /* accept matched ucast */
2313                 drop_all_ucast = 0;
2314         }
2315         if (filters & BNX2X_ACCEPT_MULTICAST)
2316                 /* accept matched mcast */
2317                 drop_all_mcast = 0;
2318
2319         if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2320                 /* accept all mcast */
2321                 drop_all_ucast = 0;
2322                 accp_all_ucast = 1;
2323         }
2324         if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2325                 /* accept all mcast */
2326                 drop_all_mcast = 0;
2327                 accp_all_mcast = 1;
2328         }
2329         if (filters & BNX2X_ACCEPT_BROADCAST) {
2330                 /* accept (all) bcast */
2331                 drop_all_bcast = 0;
2332                 accp_all_bcast = 1;
2333         }
2334
2335         bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2336                 bp->mac_filters.ucast_drop_all | mask :
2337                 bp->mac_filters.ucast_drop_all & ~mask;
2338
2339         bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2340                 bp->mac_filters.mcast_drop_all | mask :
2341                 bp->mac_filters.mcast_drop_all & ~mask;
2342
2343         bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2344                 bp->mac_filters.bcast_drop_all | mask :
2345                 bp->mac_filters.bcast_drop_all & ~mask;
2346
2347         bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2348                 bp->mac_filters.ucast_accept_all | mask :
2349                 bp->mac_filters.ucast_accept_all & ~mask;
2350
2351         bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2352                 bp->mac_filters.mcast_accept_all | mask :
2353                 bp->mac_filters.mcast_accept_all & ~mask;
2354
2355         bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2356                 bp->mac_filters.bcast_accept_all | mask :
2357                 bp->mac_filters.bcast_accept_all & ~mask;
2358
2359         bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2360                 bp->mac_filters.unmatched_unicast | mask :
2361                 bp->mac_filters.unmatched_unicast & ~mask;
2362 }
2363
2364 static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2365 {
2366         struct tstorm_eth_function_common_config tcfg = {0};
2367         u16 rss_flgs;
2368
2369         /* tpa */
2370         if (p->func_flgs & FUNC_FLG_TPA)
2371                 tcfg.config_flags |=
2372                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2373
2374         /* set rss flags */
2375         rss_flgs = (p->rss->mode <<
2376                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2377
2378         if (p->rss->cap & RSS_IPV4_CAP)
2379                 rss_flgs |= RSS_IPV4_CAP_MASK;
2380         if (p->rss->cap & RSS_IPV4_TCP_CAP)
2381                 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2382         if (p->rss->cap & RSS_IPV6_CAP)
2383                 rss_flgs |= RSS_IPV6_CAP_MASK;
2384         if (p->rss->cap & RSS_IPV6_TCP_CAP)
2385                 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2386
2387         tcfg.config_flags |= rss_flgs;
2388         tcfg.rss_result_mask = p->rss->result_mask;
2389
2390         storm_memset_func_cfg(bp, &tcfg, p->func_id);
2391
2392         /* Enable the function in the FW */
2393         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2394         storm_memset_func_en(bp, p->func_id, 1);
2395
2396         /* statistics */
2397         if (p->func_flgs & FUNC_FLG_STATS) {
2398                 struct stats_indication_flags stats_flags = {0};
2399                 stats_flags.collect_eth = 1;
2400
2401                 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2402                 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2403
2404                 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2405                 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2406
2407                 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2408                 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2409
2410                 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2411                 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2412         }
2413
2414         /* spq */
2415         if (p->func_flgs & FUNC_FLG_SPQ) {
2416                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2417                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2418                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2419         }
2420 }
2421
2422 static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2423                                      struct bnx2x_fastpath *fp)
2424 {
2425         u16 flags = 0;
2426
2427         /* calculate queue flags */
2428         flags |= QUEUE_FLG_CACHE_ALIGN;
2429         flags |= QUEUE_FLG_HC;
2430         flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
2431
2432         flags |= QUEUE_FLG_VLAN;
2433         DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2434
2435         if (!fp->disable_tpa)
2436                 flags |= QUEUE_FLG_TPA;
2437
2438         flags = stat_counter_valid(bp, fp) ?
2439                         (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
2440
2441         return flags;
2442 }
2443
2444 static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2445         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2446         struct bnx2x_rxq_init_params *rxq_init)
2447 {
2448         u16 max_sge = 0;
2449         u16 sge_sz = 0;
2450         u16 tpa_agg_size = 0;
2451
2452         /* calculate queue flags */
2453         u16 flags = bnx2x_get_cl_flags(bp, fp);
2454
2455         if (!fp->disable_tpa) {
2456                 pause->sge_th_hi = 250;
2457                 pause->sge_th_lo = 150;
2458                 tpa_agg_size = min_t(u32,
2459                         (min_t(u32, 8, MAX_SKB_FRAGS) *
2460                         SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2461                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2462                         SGE_PAGE_SHIFT;
2463                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2464                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2465                 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2466                                     0xffff);
2467         }
2468
2469         /* pause - not for e1 */
2470         if (!CHIP_IS_E1(bp)) {
2471                 pause->bd_th_hi = 350;
2472                 pause->bd_th_lo = 250;
2473                 pause->rcq_th_hi = 350;
2474                 pause->rcq_th_lo = 250;
2475                 pause->sge_th_hi = 0;
2476                 pause->sge_th_lo = 0;
2477                 pause->pri_map = 1;
2478         }
2479
2480         /* rxq setup */
2481         rxq_init->flags = flags;
2482         rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2483         rxq_init->dscr_map = fp->rx_desc_mapping;
2484         rxq_init->sge_map = fp->rx_sge_mapping;
2485         rxq_init->rcq_map = fp->rx_comp_mapping;
2486         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2487
2488         /* Always use mini-jumbo MTU for FCoE L2 ring */
2489         if (IS_FCOE_FP(fp))
2490                 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2491         else
2492                 rxq_init->mtu = bp->dev->mtu;
2493
2494         rxq_init->buf_sz = fp->rx_buf_size;
2495         rxq_init->cl_qzone_id = fp->cl_qzone_id;
2496         rxq_init->cl_id = fp->cl_id;
2497         rxq_init->spcl_id = fp->cl_id;
2498         rxq_init->stat_id = fp->cl_id;
2499         rxq_init->tpa_agg_sz = tpa_agg_size;
2500         rxq_init->sge_buf_sz = sge_sz;
2501         rxq_init->max_sges_pkt = max_sge;
2502         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2503         rxq_init->fw_sb_id = fp->fw_sb_id;
2504
2505         if (IS_FCOE_FP(fp))
2506                 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2507         else
2508                 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2509
2510         rxq_init->cid = HW_CID(bp, fp->cid);
2511
2512         rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2513 }
2514
2515 static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2516         struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2517 {
2518         u16 flags = bnx2x_get_cl_flags(bp, fp);
2519
2520         txq_init->flags = flags;
2521         txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2522         txq_init->dscr_map = fp->tx_desc_mapping;
2523         txq_init->stat_id = fp->cl_id;
2524         txq_init->cid = HW_CID(bp, fp->cid);
2525         txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2526         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2527         txq_init->fw_sb_id = fp->fw_sb_id;
2528
2529         if (IS_FCOE_FP(fp)) {
2530                 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2531                 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2532         }
2533
2534         txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2535 }
2536
2537 static void bnx2x_pf_init(struct bnx2x *bp)
2538 {
2539         struct bnx2x_func_init_params func_init = {0};
2540         struct bnx2x_rss_params rss = {0};
2541         struct event_ring_data eq_data = { {0} };
2542         u16 flags;
2543
2544         /* pf specific setups */
2545         if (!CHIP_IS_E1(bp))
2546                 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2547
2548         if (CHIP_IS_E2(bp)) {
2549                 /* reset IGU PF statistics: MSIX + ATTN */
2550                 /* PF */
2551                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2552                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2553                            (CHIP_MODE_IS_4_PORT(bp) ?
2554                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2555                 /* ATTN */
2556                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2557                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2558                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2559                            (CHIP_MODE_IS_4_PORT(bp) ?
2560                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2561         }
2562
2563         /* function setup flags */
2564         flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2565
2566         if (CHIP_IS_E1x(bp))
2567                 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2568         else
2569                 flags |= FUNC_FLG_TPA;
2570
2571         /* function setup */
2572
2573         /**
2574          * Although RSS is meaningless when there is a single HW queue we
2575          * still need it enabled in order to have HW Rx hash generated.
2576          */
2577         rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2578                    RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2579         rss.mode = bp->multi_mode;
2580         rss.result_mask = MULTI_MASK;
2581         func_init.rss = &rss;
2582
2583         func_init.func_flgs = flags;
2584         func_init.pf_id = BP_FUNC(bp);
2585         func_init.func_id = BP_FUNC(bp);
2586         func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2587         func_init.spq_map = bp->spq_mapping;
2588         func_init.spq_prod = bp->spq_prod_idx;
2589
2590         bnx2x_func_init(bp, &func_init);
2591
2592         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2593
2594         /*
2595         Congestion management values depend on the link rate
2596         There is no active link so initial link rate is set to 10 Gbps.
2597         When the link comes up The congestion management values are
2598         re-calculated according to the actual link rate.
2599         */
2600         bp->link_vars.line_speed = SPEED_10000;
2601         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2602
2603         /* Only the PMF sets the HW */
2604         if (bp->port.pmf)
2605                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2606
2607         /* no rx until link is up */
2608         bp->rx_mode = BNX2X_RX_MODE_NONE;
2609         bnx2x_set_storm_rx_mode(bp);
2610
2611         /* init Event Queue */
2612         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2613         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2614         eq_data.producer = bp->eq_prod;
2615         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2616         eq_data.sb_id = DEF_SB_ID;
2617         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2618 }
2619
2620
2621 static void bnx2x_e1h_disable(struct bnx2x *bp)
2622 {
2623         int port = BP_PORT(bp);
2624
2625         netif_tx_disable(bp->dev);
2626
2627         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2628
2629         netif_carrier_off(bp->dev);
2630 }
2631
2632 static void bnx2x_e1h_enable(struct bnx2x *bp)
2633 {
2634         int port = BP_PORT(bp);
2635
2636         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2637
2638         /* Tx queue should be only reenabled */
2639         netif_tx_wake_all_queues(bp->dev);
2640
2641         /*
2642          * Should not call netif_carrier_on since it will be called if the link
2643          * is up when checking for link state
2644          */
2645 }
2646
2647 /* called due to MCP event (on pmf):
2648  *      reread new bandwidth configuration
2649  *      configure FW
2650  *      notify others function about the change
2651  */
2652 static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2653 {
2654         if (bp->link_vars.link_up) {
2655                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2656                 bnx2x_link_sync_notify(bp);
2657         }
2658         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2659 }
2660
2661 static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2662 {
2663         bnx2x_config_mf_bw(bp);
2664         bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2665 }
2666
2667 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2668 {
2669         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2670
2671         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2672
2673                 /*
2674                  * This is the only place besides the function initialization
2675                  * where the bp->flags can change so it is done without any
2676                  * locks
2677                  */
2678                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2679                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2680                         bp->flags |= MF_FUNC_DIS;
2681
2682                         bnx2x_e1h_disable(bp);
2683                 } else {
2684                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2685                         bp->flags &= ~MF_FUNC_DIS;
2686
2687                         bnx2x_e1h_enable(bp);
2688                 }
2689                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2690         }
2691         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2692                 bnx2x_config_mf_bw(bp);
2693                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2694         }
2695
2696         /* Report results to MCP */
2697         if (dcc_event)
2698                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2699         else
2700                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2701 }
2702
2703 /* must be called under the spq lock */
2704 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2705 {
2706         struct eth_spe *next_spe = bp->spq_prod_bd;
2707
2708         if (bp->spq_prod_bd == bp->spq_last_bd) {
2709                 bp->spq_prod_bd = bp->spq;
2710                 bp->spq_prod_idx = 0;
2711                 DP(NETIF_MSG_TIMER, "end of spq\n");
2712         } else {
2713                 bp->spq_prod_bd++;
2714                 bp->spq_prod_idx++;
2715         }
2716         return next_spe;
2717 }
2718
2719 /* must be called under the spq lock */
2720 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2721 {
2722         int func = BP_FUNC(bp);
2723
2724         /* Make sure that BD data is updated before writing the producer */
2725         wmb();
2726
2727         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2728                  bp->spq_prod_idx);
2729         mmiowb();
2730 }
2731
2732 /* the slow path queue is odd since completions arrive on the fastpath ring */
2733 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2734                   u32 data_hi, u32 data_lo, int common)
2735 {
2736         struct eth_spe *spe;
2737         u16 type;
2738
2739 #ifdef BNX2X_STOP_ON_ERROR
2740         if (unlikely(bp->panic))
2741                 return -EIO;
2742 #endif
2743
2744         spin_lock_bh(&bp->spq_lock);
2745
2746         if (common) {
2747                 if (!atomic_read(&bp->eq_spq_left)) {
2748                         BNX2X_ERR("BUG! EQ ring full!\n");
2749                         spin_unlock_bh(&bp->spq_lock);
2750                         bnx2x_panic();
2751                         return -EBUSY;
2752                 }
2753         } else if (!atomic_read(&bp->cq_spq_left)) {
2754                         BNX2X_ERR("BUG! SPQ ring full!\n");
2755                         spin_unlock_bh(&bp->spq_lock);
2756                         bnx2x_panic();
2757                         return -EBUSY;
2758         }
2759
2760         spe = bnx2x_sp_get_next(bp);
2761
2762         /* CID needs port number to be encoded int it */
2763         spe->hdr.conn_and_cmd_data =
2764                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2765                                     HW_CID(bp, cid));
2766
2767         if (common)
2768                 /* Common ramrods:
2769                  *      FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2770                  *      TRAFFIC_STOP, TRAFFIC_START
2771                  */
2772                 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2773                         & SPE_HDR_CONN_TYPE;
2774         else
2775                 /* ETH ramrods: SETUP, HALT */
2776                 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2777                         & SPE_HDR_CONN_TYPE;
2778
2779         type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2780                  SPE_HDR_FUNCTION_ID);
2781
2782         spe->hdr.type = cpu_to_le16(type);
2783
2784         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2785         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2786
2787         /* stats ramrod has it's own slot on the spq */
2788         if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
2789                 /* It's ok if the actual decrement is issued towards the memory
2790                  * somewhere between the spin_lock and spin_unlock. Thus no
2791                  * more explict memory barrier is needed.
2792                  */
2793                 if (common)
2794                         atomic_dec(&bp->eq_spq_left);
2795                 else
2796                         atomic_dec(&bp->cq_spq_left);
2797         }
2798
2799
2800         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2801            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x) "
2802            "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
2803            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2804            (u32)(U64_LO(bp->spq_mapping) +
2805            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2806            HW_CID(bp, cid), data_hi, data_lo, type,
2807            atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
2808
2809         bnx2x_sp_prod_update(bp);
2810         spin_unlock_bh(&bp->spq_lock);
2811         return 0;
2812 }
2813
2814 /* acquire split MCP access lock register */
2815 static int bnx2x_acquire_alr(struct bnx2x *bp)
2816 {
2817         u32 j, val;
2818         int rc = 0;
2819
2820         might_sleep();
2821         for (j = 0; j < 1000; j++) {
2822                 val = (1UL << 31);
2823                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2824                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2825                 if (val & (1L << 31))
2826                         break;
2827
2828                 msleep(5);
2829         }
2830         if (!(val & (1L << 31))) {
2831                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2832                 rc = -EBUSY;
2833         }
2834
2835         return rc;
2836 }
2837
2838 /* release split MCP access lock register */
2839 static void bnx2x_release_alr(struct bnx2x *bp)
2840 {
2841         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2842 }
2843
2844 #define BNX2X_DEF_SB_ATT_IDX    0x0001
2845 #define BNX2X_DEF_SB_IDX        0x0002
2846
2847 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2848 {
2849         struct host_sp_status_block *def_sb = bp->def_status_blk;
2850         u16 rc = 0;
2851
2852         barrier(); /* status block is written to by the chip */
2853         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2854                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2855                 rc |= BNX2X_DEF_SB_ATT_IDX;
2856         }
2857
2858         if (bp->def_idx != def_sb->sp_sb.running_index) {
2859                 bp->def_idx = def_sb->sp_sb.running_index;
2860                 rc |= BNX2X_DEF_SB_IDX;
2861         }
2862
2863         /* Do not reorder: indecies reading should complete before handling */
2864         barrier();
2865         return rc;
2866 }
2867
2868 /*
2869  * slow path service functions
2870  */
2871
2872 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2873 {
2874         int port = BP_PORT(bp);
2875         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2876                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2877         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2878                                        NIG_REG_MASK_INTERRUPT_PORT0;
2879         u32 aeu_mask;
2880         u32 nig_mask = 0;
2881         u32 reg_addr;
2882
2883         if (bp->attn_state & asserted)
2884                 BNX2X_ERR("IGU ERROR\n");
2885
2886         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2887         aeu_mask = REG_RD(bp, aeu_addr);
2888
2889         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2890            aeu_mask, asserted);
2891         aeu_mask &= ~(asserted & 0x3ff);
2892         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2893
2894         REG_WR(bp, aeu_addr, aeu_mask);
2895         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2896
2897         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2898         bp->attn_state |= asserted;
2899         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2900
2901         if (asserted & ATTN_HARD_WIRED_MASK) {
2902                 if (asserted & ATTN_NIG_FOR_FUNC) {
2903
2904                         bnx2x_acquire_phy_lock(bp);
2905
2906                         /* save nig interrupt mask */
2907                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2908                         REG_WR(bp, nig_int_mask_addr, 0);
2909
2910                         bnx2x_link_attn(bp);
2911
2912                         /* handle unicore attn? */
2913                 }
2914                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2915                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2916
2917                 if (asserted & GPIO_2_FUNC)
2918                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2919
2920                 if (asserted & GPIO_3_FUNC)
2921                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2922
2923                 if (asserted & GPIO_4_FUNC)
2924                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2925
2926                 if (port == 0) {
2927                         if (asserted & ATTN_GENERAL_ATTN_1) {
2928                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2929                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2930                         }
2931                         if (asserted & ATTN_GENERAL_ATTN_2) {
2932                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2933                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2934                         }
2935                         if (asserted & ATTN_GENERAL_ATTN_3) {
2936                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2937                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2938                         }
2939                 } else {
2940                         if (asserted & ATTN_GENERAL_ATTN_4) {
2941                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2942                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2943                         }
2944                         if (asserted & ATTN_GENERAL_ATTN_5) {
2945                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2946                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2947                         }
2948                         if (asserted & ATTN_GENERAL_ATTN_6) {
2949                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2950                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2951                         }
2952                 }
2953
2954         } /* if hardwired */
2955
2956         if (bp->common.int_block == INT_BLOCK_HC)
2957                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2958                             COMMAND_REG_ATTN_BITS_SET);
2959         else
2960                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2961
2962         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2963            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2964         REG_WR(bp, reg_addr, asserted);
2965
2966         /* now set back the mask */
2967         if (asserted & ATTN_NIG_FOR_FUNC) {
2968                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2969                 bnx2x_release_phy_lock(bp);
2970         }
2971 }
2972
2973 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2974 {
2975         int port = BP_PORT(bp);
2976         u32 ext_phy_config;
2977         /* mark the failure */
2978         ext_phy_config =
2979                 SHMEM_RD(bp,
2980                          dev_info.port_hw_config[port].external_phy_config);
2981
2982         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2983         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2984         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2985                  ext_phy_config);
2986
2987         /* log the failure */
2988         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2989                " the driver to shutdown the card to prevent permanent"
2990                " damage.  Please contact OEM Support for assistance\n");
2991 }
2992
2993 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2994 {
2995         int port = BP_PORT(bp);
2996         int reg_offset;
2997         u32 val;
2998
2999         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3000                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3001
3002         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
3003
3004                 val = REG_RD(bp, reg_offset);
3005                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
3006                 REG_WR(bp, reg_offset, val);
3007
3008                 BNX2X_ERR("SPIO5 hw attention\n");
3009
3010                 /* Fan failure attention */
3011                 bnx2x_hw_reset_phy(&bp->link_params);
3012                 bnx2x_fan_failure(bp);
3013         }
3014
3015         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3016                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3017                 bnx2x_acquire_phy_lock(bp);
3018                 bnx2x_handle_module_detect_int(&bp->link_params);
3019                 bnx2x_release_phy_lock(bp);
3020         }
3021
3022         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3023
3024                 val = REG_RD(bp, reg_offset);
3025                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3026                 REG_WR(bp, reg_offset, val);
3027
3028                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3029                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3030                 bnx2x_panic();
3031         }
3032 }
3033
3034 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3035 {
3036         u32 val;
3037
3038         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3039
3040                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3041                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3042                 /* DORQ discard attention */
3043                 if (val & 0x2)
3044                         BNX2X_ERR("FATAL error from DORQ\n");
3045         }
3046
3047         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3048
3049                 int port = BP_PORT(bp);
3050                 int reg_offset;
3051
3052                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3053                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3054
3055                 val = REG_RD(bp, reg_offset);
3056                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3057                 REG_WR(bp, reg_offset, val);
3058
3059                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3060                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3061                 bnx2x_panic();
3062         }
3063 }
3064
3065 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3066 {
3067         u32 val;
3068
3069         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3070
3071                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3072                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3073                 /* CFC error attention */
3074                 if (val & 0x2)
3075                         BNX2X_ERR("FATAL error from CFC\n");
3076         }
3077
3078         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3079
3080                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3081                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3082                 /* RQ_USDMDP_FIFO_OVERFLOW */
3083                 if (val & 0x18000)
3084                         BNX2X_ERR("FATAL error from PXP\n");
3085                 if (CHIP_IS_E2(bp)) {
3086                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3087                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3088                 }
3089         }
3090
3091         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3092
3093                 int port = BP_PORT(bp);
3094                 int reg_offset;
3095
3096                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3097                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3098
3099                 val = REG_RD(bp, reg_offset);
3100                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3101                 REG_WR(bp, reg_offset, val);
3102
3103                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3104                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3105                 bnx2x_panic();
3106         }
3107 }
3108
3109 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3110 {
3111         u32 val;
3112
3113         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3114
3115                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3116                         int func = BP_FUNC(bp);
3117
3118                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3119                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3120                                         func_mf_config[BP_ABS_FUNC(bp)].config);
3121                         val = SHMEM_RD(bp,
3122                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
3123                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3124                                 bnx2x_dcc_event(bp,
3125                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3126
3127                         if (val & DRV_STATUS_SET_MF_BW)
3128                                 bnx2x_set_mf_bw(bp);
3129
3130                         bnx2x__link_status_update(bp);
3131                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3132                                 bnx2x_pmf_update(bp);
3133
3134                         if (bp->port.pmf &&
3135                             (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3136                                 bp->dcbx_enabled > 0)
3137                                 /* start dcbx state machine */
3138                                 bnx2x_dcbx_set_params(bp,
3139                                         BNX2X_DCBX_STATE_NEG_RECEIVED);
3140                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3141
3142                         BNX2X_ERR("MC assert!\n");
3143                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3144                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3145                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3146                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3147                         bnx2x_panic();
3148
3149                 } else if (attn & BNX2X_MCP_ASSERT) {
3150
3151                         BNX2X_ERR("MCP assert!\n");
3152                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3153                         bnx2x_fw_dump(bp);
3154
3155                 } else
3156                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3157         }
3158
3159         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3160                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3161                 if (attn & BNX2X_GRC_TIMEOUT) {
3162                         val = CHIP_IS_E1(bp) ? 0 :
3163                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3164                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3165                 }
3166                 if (attn & BNX2X_GRC_RSV) {
3167                         val = CHIP_IS_E1(bp) ? 0 :
3168                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3169                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3170                 }
3171                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3172         }
3173 }
3174
3175 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3176 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3177 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3178 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3179 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3180
3181 /*
3182  * should be run under rtnl lock
3183  */
3184 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3185 {
3186         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3187         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3188         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3189         barrier();
3190         mmiowb();
3191 }
3192
3193 /*
3194  * should be run under rtnl lock
3195  */
3196 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3197 {
3198         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3199         val |= (1 << 16);
3200         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3201         barrier();
3202         mmiowb();
3203 }
3204
3205 /*
3206  * should be run under rtnl lock
3207  */
3208 bool bnx2x_reset_is_done(struct bnx2x *bp)
3209 {
3210         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3211         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3212         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3213 }
3214
3215 /*
3216  * should be run under rtnl lock
3217  */
3218 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3219 {
3220         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3221
3222         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3223
3224         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3225         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3226         barrier();
3227         mmiowb();
3228 }
3229
3230 /*
3231  * should be run under rtnl lock
3232  */
3233 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3234 {
3235         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3236
3237         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3238
3239         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3240         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3241         barrier();
3242         mmiowb();
3243
3244         return val1;
3245 }
3246
3247 /*
3248  * should be run under rtnl lock
3249  */
3250 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3251 {
3252         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3253 }
3254
3255 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3256 {
3257         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3258         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3259 }
3260
3261 static inline void _print_next_block(int idx, const char *blk)
3262 {
3263         if (idx)
3264                 pr_cont(", ");
3265         pr_cont("%s", blk);
3266 }
3267
3268 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3269 {
3270         int i = 0;
3271         u32 cur_bit = 0;
3272         for (i = 0; sig; i++) {
3273                 cur_bit = ((u32)0x1 << i);
3274                 if (sig & cur_bit) {
3275                         switch (cur_bit) {
3276                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3277                                 _print_next_block(par_num++, "BRB");
3278                                 break;
3279                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3280                                 _print_next_block(par_num++, "PARSER");
3281                                 break;
3282                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3283                                 _print_next_block(par_num++, "TSDM");
3284                                 break;
3285                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3286                                 _print_next_block(par_num++, "SEARCHER");
3287                                 break;
3288                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3289                                 _print_next_block(par_num++, "TSEMI");
3290                                 break;
3291                         }
3292
3293                         /* Clear the bit */
3294                         sig &= ~cur_bit;
3295                 }
3296         }
3297
3298         return par_num;
3299 }
3300
3301 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3302 {
3303         int i = 0;
3304         u32 cur_bit = 0;
3305         for (i = 0; sig; i++) {
3306                 cur_bit = ((u32)0x1 << i);
3307                 if (sig & cur_bit) {
3308                         switch (cur_bit) {
3309                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3310                                 _print_next_block(par_num++, "PBCLIENT");
3311                                 break;
3312                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3313                                 _print_next_block(par_num++, "QM");
3314                                 break;
3315                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3316                                 _print_next_block(par_num++, "XSDM");
3317                                 break;
3318                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3319                                 _print_next_block(par_num++, "XSEMI");
3320                                 break;
3321                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3322                                 _print_next_block(par_num++, "DOORBELLQ");
3323                                 break;
3324                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3325                                 _print_next_block(par_num++, "VAUX PCI CORE");
3326                                 break;
3327                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3328                                 _print_next_block(par_num++, "DEBUG");
3329                                 break;
3330                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3331                                 _print_next_block(par_num++, "USDM");
3332                                 break;
3333                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3334                                 _print_next_block(par_num++, "USEMI");
3335                                 break;
3336                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3337                                 _print_next_block(par_num++, "UPB");
3338                                 break;
3339                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3340                                 _print_next_block(par_num++, "CSDM");
3341                                 break;
3342                         }
3343
3344                         /* Clear the bit */
3345                         sig &= ~cur_bit;
3346                 }
3347         }
3348
3349         return par_num;
3350 }
3351
3352 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3353 {
3354         int i = 0;
3355         u32 cur_bit = 0;
3356         for (i = 0; sig; i++) {
3357                 cur_bit = ((u32)0x1 << i);
3358                 if (sig & cur_bit) {
3359                         switch (cur_bit) {
3360                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3361                                 _print_next_block(par_num++, "CSEMI");
3362                                 break;
3363                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3364                                 _print_next_block(par_num++, "PXP");
3365                                 break;
3366                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3367                                 _print_next_block(par_num++,
3368                                         "PXPPCICLOCKCLIENT");
3369                                 break;
3370                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3371                                 _print_next_block(par_num++, "CFC");
3372                                 break;
3373                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3374                                 _print_next_block(par_num++, "CDU");
3375                                 break;
3376                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3377                                 _print_next_block(par_num++, "IGU");
3378                                 break;
3379                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3380                                 _print_next_block(par_num++, "MISC");
3381                                 break;
3382                         }
3383
3384                         /* Clear the bit */
3385                         sig &= ~cur_bit;
3386                 }
3387         }
3388
3389         return par_num;
3390 }
3391
3392 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3393 {
3394         int i = 0;
3395         u32 cur_bit = 0;
3396         for (i = 0; sig; i++) {
3397                 cur_bit = ((u32)0x1 << i);
3398                 if (sig & cur_bit) {
3399                         switch (cur_bit) {
3400                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3401                                 _print_next_block(par_num++, "MCP ROM");
3402                                 break;
3403                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3404                                 _print_next_block(par_num++, "MCP UMP RX");
3405                                 break;
3406                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3407                                 _print_next_block(par_num++, "MCP UMP TX");
3408                                 break;
3409                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3410                                 _print_next_block(par_num++, "MCP SCPAD");
3411                                 break;
3412                         }
3413
3414                         /* Clear the bit */
3415                         sig &= ~cur_bit;
3416                 }
3417         }
3418
3419         return par_num;
3420 }
3421
3422 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3423                                      u32 sig2, u32 sig3)
3424 {
3425         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3426             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3427                 int par_num = 0;
3428                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3429                         "[0]:0x%08x [1]:0x%08x "
3430                         "[2]:0x%08x [3]:0x%08x\n",
3431                           sig0 & HW_PRTY_ASSERT_SET_0,
3432                           sig1 & HW_PRTY_ASSERT_SET_1,
3433                           sig2 & HW_PRTY_ASSERT_SET_2,
3434                           sig3 & HW_PRTY_ASSERT_SET_3);
3435                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3436                        bp->dev->name);
3437                 par_num = bnx2x_print_blocks_with_parity0(
3438                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3439                 par_num = bnx2x_print_blocks_with_parity1(
3440                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3441                 par_num = bnx2x_print_blocks_with_parity2(
3442                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3443                 par_num = bnx2x_print_blocks_with_parity3(
3444                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3445                 printk("\n");
3446                 return true;
3447         } else
3448                 return false;
3449 }
3450
3451 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3452 {
3453         struct attn_route attn;
3454         int port = BP_PORT(bp);
3455
3456         attn.sig[0] = REG_RD(bp,
3457                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3458                              port*4);
3459         attn.sig[1] = REG_RD(bp,
3460                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3461                              port*4);
3462         attn.sig[2] = REG_RD(bp,
3463                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3464                              port*4);
3465         attn.sig[3] = REG_RD(bp,
3466                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3467                              port*4);
3468
3469         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3470                                         attn.sig[3]);
3471 }
3472
3473
3474 static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3475 {
3476         u32 val;
3477         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3478
3479                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3480                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3481                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3482                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3483                                   "ADDRESS_ERROR\n");
3484                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3485                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3486                                   "INCORRECT_RCV_BEHAVIOR\n");
3487                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3488                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3489                                   "WAS_ERROR_ATTN\n");
3490                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3491                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3492                                   "VF_LENGTH_VIOLATION_ATTN\n");
3493                 if (val &
3494                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3495                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3496                                   "VF_GRC_SPACE_VIOLATION_ATTN\n");
3497                 if (val &
3498                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3499                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3500                                   "VF_MSIX_BAR_VIOLATION_ATTN\n");
3501                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3502                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3503                                   "TCPL_ERROR_ATTN\n");
3504                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3505                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3506                                   "TCPL_IN_TWO_RCBS_ATTN\n");
3507                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3508                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3509                                   "CSSNOOP_FIFO_OVERFLOW\n");
3510         }
3511         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3512                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3513                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3514                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3515                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3516                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3517                         BNX2X_ERR("ATC_ATC_INT_STS_REG"
3518                                   "_ATC_TCPL_TO_NOT_PEND\n");
3519                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3520                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3521                                   "ATC_GPA_MULTIPLE_HITS\n");
3522                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3523                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3524                                   "ATC_RCPL_TO_EMPTY_CNT\n");
3525                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3526                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3527                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3528                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3529                                   "ATC_IREQ_LESS_THAN_STU\n");
3530         }
3531
3532         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3533                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3534                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3535                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3536                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3537         }
3538
3539 }
3540
3541 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3542 {
3543         struct attn_route attn, *group_mask;
3544         int port = BP_PORT(bp);
3545         int index;
3546         u32 reg_addr;
3547         u32 val;
3548         u32 aeu_mask;
3549
3550         /* need to take HW lock because MCP or other port might also
3551            try to handle this event */
3552         bnx2x_acquire_alr(bp);
3553
3554         if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
3555                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3556                 bnx2x_set_reset_in_progress(bp);
3557                 schedule_delayed_work(&bp->reset_task, 0);
3558                 /* Disable HW interrupts */
3559                 bnx2x_int_disable(bp);
3560                 bnx2x_release_alr(bp);
3561                 /* In case of parity errors don't handle attentions so that
3562                  * other function would "see" parity errors.
3563                  */
3564                 return;
3565         }
3566
3567         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3568         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3569         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3570         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3571         if (CHIP_IS_E2(bp))
3572                 attn.sig[4] =
3573                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3574         else
3575                 attn.sig[4] = 0;
3576
3577         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3578            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3579
3580         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3581                 if (deasserted & (1 << index)) {
3582                         group_mask = &bp->attn_group[index];
3583
3584                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3585                                          "%08x %08x %08x\n",
3586                            index,
3587                            group_mask->sig[0], group_mask->sig[1],
3588                            group_mask->sig[2], group_mask->sig[3],
3589                            group_mask->sig[4]);
3590
3591                         bnx2x_attn_int_deasserted4(bp,
3592                                         attn.sig[4] & group_mask->sig[4]);
3593                         bnx2x_attn_int_deasserted3(bp,
3594                                         attn.sig[3] & group_mask->sig[3]);
3595                         bnx2x_attn_int_deasserted1(bp,
3596                                         attn.sig[1] & group_mask->sig[1]);
3597                         bnx2x_attn_int_deasserted2(bp,
3598                                         attn.sig[2] & group_mask->sig[2]);
3599                         bnx2x_attn_int_deasserted0(bp,
3600                                         attn.sig[0] & group_mask->sig[0]);
3601                 }
3602         }
3603
3604         bnx2x_release_alr(bp);
3605
3606         if (bp->common.int_block == INT_BLOCK_HC)
3607                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3608                             COMMAND_REG_ATTN_BITS_CLR);
3609         else
3610                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3611
3612         val = ~deasserted;
3613         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3614            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3615         REG_WR(bp, reg_addr, val);
3616
3617         if (~bp->attn_state & deasserted)
3618                 BNX2X_ERR("IGU ERROR\n");
3619
3620         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3621                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3622
3623         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3624         aeu_mask = REG_RD(bp, reg_addr);
3625
3626         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3627            aeu_mask, deasserted);
3628         aeu_mask |= (deasserted & 0x3ff);
3629         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3630
3631         REG_WR(bp, reg_addr, aeu_mask);
3632         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3633
3634         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3635         bp->attn_state &= ~deasserted;
3636         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3637 }
3638
3639 static void bnx2x_attn_int(struct bnx2x *bp)
3640 {
3641         /* read local copy of bits */
3642         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3643                                                                 attn_bits);
3644         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3645                                                                 attn_bits_ack);
3646         u32 attn_state = bp->attn_state;
3647
3648         /* look for changed bits */
3649         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3650         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3651
3652         DP(NETIF_MSG_HW,
3653            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3654            attn_bits, attn_ack, asserted, deasserted);
3655
3656         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3657                 BNX2X_ERR("BAD attention state\n");
3658
3659         /* handle bits that were raised */
3660         if (asserted)
3661                 bnx2x_attn_int_asserted(bp, asserted);
3662
3663         if (deasserted)
3664                 bnx2x_attn_int_deasserted(bp, deasserted);
3665 }
3666
3667 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3668 {
3669         /* No memory barriers */
3670         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3671         mmiowb(); /* keep prod updates ordered */
3672 }
3673
3674 #ifdef BCM_CNIC
3675 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3676                                       union event_ring_elem *elem)
3677 {
3678         if (!bp->cnic_eth_dev.starting_cid  ||
3679             cid < bp->cnic_eth_dev.starting_cid)
3680                 return 1;
3681
3682         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3683
3684         if (unlikely(elem->message.data.cfc_del_event.error)) {
3685                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3686                           cid);
3687                 bnx2x_panic_dump(bp);
3688         }
3689         bnx2x_cnic_cfc_comp(bp, cid);
3690         return 0;
3691 }
3692 #endif
3693
3694 static void bnx2x_eq_int(struct bnx2x *bp)
3695 {
3696         u16 hw_cons, sw_cons, sw_prod;
3697         union event_ring_elem *elem;
3698         u32 cid;
3699         u8 opcode;
3700         int spqe_cnt = 0;
3701
3702         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3703
3704         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3705          * when we get the the next-page we nned to adjust so the loop
3706          * condition below will be met. The next element is the size of a
3707          * regular element and hence incrementing by 1
3708          */
3709         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3710                 hw_cons++;
3711
3712         /* This function may never run in parralel with itself for a
3713          * specific bp, thus there is no need in "paired" read memory
3714          * barrier here.
3715          */
3716         sw_cons = bp->eq_cons;
3717         sw_prod = bp->eq_prod;
3718
3719         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->cq_spq_left %u\n",
3720                         hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
3721
3722         for (; sw_cons != hw_cons;
3723               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3724
3725
3726                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3727
3728                 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3729                 opcode = elem->message.opcode;
3730
3731
3732                 /* handle eq element */
3733                 switch (opcode) {
3734                 case EVENT_RING_OPCODE_STAT_QUERY:
3735                         DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3736                         /* nothing to do with stats comp */
3737                         continue;
3738
3739                 case EVENT_RING_OPCODE_CFC_DEL:
3740                         /* handle according to cid range */
3741                         /*
3742                          * we may want to verify here that the bp state is
3743                          * HALTING
3744                          */
3745                         DP(NETIF_MSG_IFDOWN,
3746                            "got delete ramrod for MULTI[%d]\n", cid);
3747 #ifdef BCM_CNIC
3748                         if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3749                                 goto next_spqe;
3750                         if (cid == BNX2X_FCOE_ETH_CID)
3751                                 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3752                         else
3753 #endif
3754                                 bnx2x_fp(bp, cid, state) =
3755                                                 BNX2X_FP_STATE_CLOSED;
3756
3757                         goto next_spqe;
3758
3759                 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3760                         DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3761                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3762                         goto next_spqe;
3763                 case EVENT_RING_OPCODE_START_TRAFFIC:
3764                         DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3765                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3766                         goto next_spqe;
3767                 }
3768
3769                 switch (opcode | bp->state) {
3770                 case (EVENT_RING_OPCODE_FUNCTION_START |
3771                       BNX2X_STATE_OPENING_WAIT4_PORT):
3772                         DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3773                         bp->state = BNX2X_STATE_FUNC_STARTED;
3774                         break;
3775
3776                 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3777                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3778                         DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3779                         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3780                         break;
3781
3782                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3783                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3784                         DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3785                         if (elem->message.data.set_mac_event.echo)
3786                                 bp->set_mac_pending = 0;
3787                         break;
3788
3789                 case (EVENT_RING_OPCODE_SET_MAC |
3790                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3791                         DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3792                         if (elem->message.data.set_mac_event.echo)
3793                                 bp->set_mac_pending = 0;
3794                         break;
3795                 default:
3796                         /* unknown event log error and continue */
3797                         BNX2X_ERR("Unknown EQ event %d\n",
3798                                   elem->message.opcode);
3799                 }
3800 next_spqe:
3801                 spqe_cnt++;
3802         } /* for */
3803
3804         smp_mb__before_atomic_inc();
3805         atomic_add(spqe_cnt, &bp->eq_spq_left);
3806
3807         bp->eq_cons = sw_cons;
3808         bp->eq_prod = sw_prod;
3809         /* Make sure that above mem writes were issued towards the memory */
3810         smp_wmb();
3811
3812         /* update producer */
3813         bnx2x_update_eq_prod(bp, bp->eq_prod);
3814 }
3815
3816 static void bnx2x_sp_task(struct work_struct *work)
3817 {
3818         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3819         u16 status;
3820
3821         /* Return here if interrupt is disabled */
3822         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3823                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3824                 return;
3825         }
3826
3827         status = bnx2x_update_dsb_idx(bp);
3828 /*      if (status == 0)                                     */
3829 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3830
3831         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3832
3833         /* HW attentions */
3834         if (status & BNX2X_DEF_SB_ATT_IDX) {
3835                 bnx2x_attn_int(bp);
3836                 status &= ~BNX2X_DEF_SB_ATT_IDX;
3837         }
3838
3839         /* SP events: STAT_QUERY and others */
3840         if (status & BNX2X_DEF_SB_IDX) {
3841 #ifdef BCM_CNIC
3842                 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
3843
3844                 if ((!NO_FCOE(bp)) &&
3845                         (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3846                         napi_schedule(&bnx2x_fcoe(bp, napi));
3847 #endif
3848                 /* Handle EQ completions */
3849                 bnx2x_eq_int(bp);
3850
3851                 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3852                         le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3853
3854                 status &= ~BNX2X_DEF_SB_IDX;
3855         }
3856
3857         if (unlikely(status))
3858                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3859                    status);
3860
3861         bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3862              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3863 }
3864
3865 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3866 {
3867         struct net_device *dev = dev_instance;
3868         struct bnx2x *bp = netdev_priv(dev);
3869
3870         /* Return here if interrupt is disabled */
3871         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3872                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3873                 return IRQ_HANDLED;
3874         }
3875
3876         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3877                      IGU_INT_DISABLE, 0);
3878
3879 #ifdef BNX2X_STOP_ON_ERROR
3880         if (unlikely(bp->panic))
3881                 return IRQ_HANDLED;
3882 #endif
3883
3884 #ifdef BCM_CNIC
3885         {
3886                 struct cnic_ops *c_ops;
3887
3888                 rcu_read_lock();
3889                 c_ops = rcu_dereference(bp->cnic_ops);
3890                 if (c_ops)
3891                         c_ops->cnic_handler(bp->cnic_data, NULL);
3892                 rcu_read_unlock();
3893         }
3894 #endif
3895         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3896
3897         return IRQ_HANDLED;
3898 }
3899
3900 /* end of slow path */
3901
3902 static void bnx2x_timer(unsigned long data)
3903 {
3904         struct bnx2x *bp = (struct bnx2x *) data;
3905
3906         if (!netif_running(bp->dev))
3907                 return;
3908
3909         if (atomic_read(&bp->intr_sem) != 0)
3910                 goto timer_restart;
3911
3912         if (poll) {
3913                 struct bnx2x_fastpath *fp = &bp->fp[0];
3914                 int rc;
3915
3916                 bnx2x_tx_int(fp);
3917                 rc = bnx2x_rx_int(fp, 1000);
3918         }
3919
3920         if (!BP_NOMCP(bp)) {
3921                 int mb_idx = BP_FW_MB_IDX(bp);
3922                 u32 drv_pulse;
3923                 u32 mcp_pulse;
3924
3925                 ++bp->fw_drv_pulse_wr_seq;
3926                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3927                 /* TBD - add SYSTEM_TIME */
3928                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3929                 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3930
3931                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3932                              MCP_PULSE_SEQ_MASK);
3933                 /* The delta between driver pulse and mcp response
3934                  * should be 1 (before mcp response) or 0 (after mcp response)
3935                  */
3936                 if ((drv_pulse != mcp_pulse) &&
3937                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3938                         /* someone lost a heartbeat... */
3939                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3940                                   drv_pulse, mcp_pulse);
3941                 }
3942         }
3943
3944         if (bp->state == BNX2X_STATE_OPEN)
3945                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3946
3947 timer_restart:
3948         mod_timer(&bp->timer, jiffies + bp->current_interval);
3949 }
3950
3951 /* end of Statistics */
3952
3953 /* nic init */
3954
3955 /*
3956  * nic init service functions
3957  */
3958
3959 static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3960 {
3961         u32 i;
3962         if (!(len%4) && !(addr%4))
3963                 for (i = 0; i < len; i += 4)
3964                         REG_WR(bp, addr + i, fill);
3965         else
3966                 for (i = 0; i < len; i++)
3967                         REG_WR8(bp, addr + i, fill);
3968
3969 }
3970
3971 /* helper: writes FP SP data to FW - data_size in dwords */
3972 static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3973                                        int fw_sb_id,
3974                                        u32 *sb_data_p,
3975                                        u32 data_size)
3976 {
3977         int index;
3978         for (index = 0; index < data_size; index++)
3979                 REG_WR(bp, BAR_CSTRORM_INTMEM +
3980                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3981                         sizeof(u32)*index,
3982                         *(sb_data_p + index));
3983 }
3984
3985 static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3986 {
3987         u32 *sb_data_p;
3988         u32 data_size = 0;
3989         struct hc_status_block_data_e2 sb_data_e2;
3990         struct hc_status_block_data_e1x sb_data_e1x;
3991
3992         /* disable the function first */
3993         if (CHIP_IS_E2(bp)) {
3994                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3995                 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3996                 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3997                 sb_data_e2.common.p_func.vf_valid = false;
3998                 sb_data_p = (u32 *)&sb_data_e2;
3999                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4000         } else {
4001                 memset(&sb_data_e1x, 0,
4002                        sizeof(struct hc_status_block_data_e1x));
4003                 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
4004                 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
4005                 sb_data_e1x.common.p_func.vf_valid = false;
4006                 sb_data_p = (u32 *)&sb_data_e1x;
4007                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4008         }
4009         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4010
4011         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4012                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
4013                         CSTORM_STATUS_BLOCK_SIZE);
4014         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4015                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
4016                         CSTORM_SYNC_BLOCK_SIZE);
4017 }
4018
4019 /* helper:  writes SP SB data to FW */
4020 static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
4021                 struct hc_sp_status_block_data *sp_sb_data)
4022 {
4023         int func = BP_FUNC(bp);
4024         int i;
4025         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
4026                 REG_WR(bp, BAR_CSTRORM_INTMEM +
4027                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
4028                         i*sizeof(u32),
4029                         *((u32 *)sp_sb_data + i));
4030 }
4031
4032 static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
4033 {
4034         int func = BP_FUNC(bp);
4035         struct hc_sp_status_block_data sp_sb_data;
4036         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4037
4038         sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4039         sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4040         sp_sb_data.p_func.vf_valid = false;
4041
4042         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4043
4044         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4045                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4046                         CSTORM_SP_STATUS_BLOCK_SIZE);
4047         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4048                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4049                         CSTORM_SP_SYNC_BLOCK_SIZE);
4050
4051 }
4052
4053
4054 static inline
4055 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4056                                            int igu_sb_id, int igu_seg_id)
4057 {
4058         hc_sm->igu_sb_id = igu_sb_id;
4059         hc_sm->igu_seg_id = igu_seg_id;
4060         hc_sm->timer_value = 0xFF;
4061         hc_sm->time_to_expire = 0xFFFFFFFF;
4062 }
4063
4064 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4065                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
4066 {
4067         int igu_seg_id;
4068
4069         struct hc_status_block_data_e2 sb_data_e2;
4070         struct hc_status_block_data_e1x sb_data_e1x;
4071         struct hc_status_block_sm  *hc_sm_p;
4072         struct hc_index_data *hc_index_p;
4073         int data_size;
4074         u32 *sb_data_p;
4075
4076         if (CHIP_INT_MODE_IS_BC(bp))
4077                 igu_seg_id = HC_SEG_ACCESS_NORM;
4078         else
4079                 igu_seg_id = IGU_SEG_ACCESS_NORM;
4080
4081         bnx2x_zero_fp_sb(bp, fw_sb_id);
4082
4083         if (CHIP_IS_E2(bp)) {
4084                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4085                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4086                 sb_data_e2.common.p_func.vf_id = vfid;
4087                 sb_data_e2.common.p_func.vf_valid = vf_valid;
4088                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4089                 sb_data_e2.common.same_igu_sb_1b = true;
4090                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4091                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4092                 hc_sm_p = sb_data_e2.common.state_machine;
4093                 hc_index_p = sb_data_e2.index_data;
4094                 sb_data_p = (u32 *)&sb_data_e2;
4095                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4096         } else {
4097                 memset(&sb_data_e1x, 0,
4098                        sizeof(struct hc_status_block_data_e1x));
4099                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4100                 sb_data_e1x.common.p_func.vf_id = 0xff;
4101                 sb_data_e1x.common.p_func.vf_valid = false;
4102                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4103                 sb_data_e1x.common.same_igu_sb_1b = true;
4104                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4105                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4106                 hc_sm_p = sb_data_e1x.common.state_machine;
4107                 hc_index_p = sb_data_e1x.index_data;
4108                 sb_data_p = (u32 *)&sb_data_e1x;
4109                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4110         }
4111
4112         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4113                                        igu_sb_id, igu_seg_id);
4114         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4115                                        igu_sb_id, igu_seg_id);
4116
4117         DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4118
4119         /* write indecies to HW */
4120         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4121 }
4122
4123 static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4124                                         u8 sb_index, u8 disable, u16 usec)
4125 {
4126         int port = BP_PORT(bp);
4127         u8 ticks = usec / BNX2X_BTR;
4128
4129         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4130
4131         disable = disable ? 1 : (usec ? 0 : 1);
4132         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4133 }
4134
4135 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4136                                      u16 tx_usec, u16 rx_usec)
4137 {
4138         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4139                                     false, rx_usec);
4140         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4141                                     false, tx_usec);
4142 }
4143
4144 static void bnx2x_init_def_sb(struct bnx2x *bp)
4145 {
4146         struct host_sp_status_block *def_sb = bp->def_status_blk;
4147         dma_addr_t mapping = bp->def_status_blk_mapping;
4148         int igu_sp_sb_index;
4149         int igu_seg_id;
4150         int port = BP_PORT(bp);
4151         int func = BP_FUNC(bp);
4152         int reg_offset;
4153         u64 section;
4154         int index;
4155         struct hc_sp_status_block_data sp_sb_data;
4156         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4157
4158         if (CHIP_INT_MODE_IS_BC(bp)) {
4159                 igu_sp_sb_index = DEF_SB_IGU_ID;
4160                 igu_seg_id = HC_SEG_ACCESS_DEF;
4161         } else {
4162                 igu_sp_sb_index = bp->igu_dsb_id;
4163                 igu_seg_id = IGU_SEG_ACCESS_DEF;
4164         }
4165
4166         /* ATTN */
4167         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4168                                             atten_status_block);
4169         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4170
4171         bp->attn_state = 0;
4172
4173         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4174                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4175         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4176                 int sindex;
4177                 /* take care of sig[0]..sig[4] */
4178                 for (sindex = 0; sindex < 4; sindex++)
4179                         bp->attn_group[index].sig[sindex] =
4180                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4181
4182                 if (CHIP_IS_E2(bp))
4183                         /*
4184                          * enable5 is separate from the rest of the registers,
4185                          * and therefore the address skip is 4
4186                          * and not 16 between the different groups
4187                          */
4188                         bp->attn_group[index].sig[4] = REG_RD(bp,
4189                                         reg_offset + 0x10 + 0x4*index);
4190                 else
4191                         bp->attn_group[index].sig[4] = 0;
4192         }
4193
4194         if (bp->common.int_block == INT_BLOCK_HC) {
4195                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4196                                      HC_REG_ATTN_MSG0_ADDR_L);
4197
4198                 REG_WR(bp, reg_offset, U64_LO(section));
4199                 REG_WR(bp, reg_offset + 4, U64_HI(section));
4200         } else if (CHIP_IS_E2(bp)) {
4201                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4202                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4203         }
4204
4205         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4206                                             sp_sb);
4207
4208         bnx2x_zero_sp_sb(bp);
4209
4210         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
4211         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
4212         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
4213         sp_sb_data.igu_seg_id           = igu_seg_id;
4214         sp_sb_data.p_func.pf_id         = func;
4215         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
4216         sp_sb_data.p_func.vf_id         = 0xff;
4217
4218         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4219
4220         bp->stats_pending = 0;
4221         bp->set_mac_pending = 0;
4222
4223         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
4224 }
4225
4226 void bnx2x_update_coalesce(struct bnx2x *bp)
4227 {
4228         int i;
4229
4230         for_each_eth_queue(bp, i)
4231                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4232                                          bp->tx_ticks, bp->rx_ticks);
4233 }
4234
4235 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4236 {
4237         spin_lock_init(&bp->spq_lock);
4238         atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
4239
4240         bp->spq_prod_idx = 0;
4241         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4242         bp->spq_prod_bd = bp->spq;
4243         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4244 }
4245
4246 static void bnx2x_init_eq_ring(struct bnx2x *bp)
4247 {
4248         int i;
4249         for (i = 1; i <= NUM_EQ_PAGES; i++) {
4250                 union event_ring_elem *elem =
4251                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
4252
4253                 elem->next_page.addr.hi =
4254                         cpu_to_le32(U64_HI(bp->eq_mapping +
4255                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4256                 elem->next_page.addr.lo =
4257                         cpu_to_le32(U64_LO(bp->eq_mapping +
4258                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
4259         }
4260         bp->eq_cons = 0;
4261         bp->eq_prod = NUM_EQ_DESC;
4262         bp->eq_cons_sb = BNX2X_EQ_INDEX;
4263         /* we want a warning message before it gets rought... */
4264         atomic_set(&bp->eq_spq_left,
4265                 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
4266 }
4267
4268 void bnx2x_push_indir_table(struct bnx2x *bp)
4269 {
4270         int func = BP_FUNC(bp);
4271         int i;
4272
4273         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4274                 return;
4275
4276         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4277                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4278                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4279                         bp->fp->cl_id + bp->rx_indir_table[i]);
4280 }
4281
4282 static void bnx2x_init_ind_table(struct bnx2x *bp)
4283 {
4284         int i;
4285
4286         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4287                 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
4288
4289         bnx2x_push_indir_table(bp);
4290 }
4291
4292 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4293 {
4294         int mode = bp->rx_mode;
4295         int port = BP_PORT(bp);
4296         u16 cl_id;
4297         u32 def_q_filters = 0;
4298
4299         /* All but management unicast packets should pass to the host as well */
4300         u32 llh_mask =
4301                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4302                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4303                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4304                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4305
4306         switch (mode) {
4307         case BNX2X_RX_MODE_NONE: /* no Rx */
4308                 def_q_filters = BNX2X_ACCEPT_NONE;
4309 #ifdef BCM_CNIC
4310                 if (!NO_FCOE(bp)) {
4311                         cl_id = bnx2x_fcoe(bp, cl_id);
4312                         bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4313                 }
4314 #endif
4315                 break;
4316
4317         case BNX2X_RX_MODE_NORMAL:
4318                 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4319                                 BNX2X_ACCEPT_MULTICAST;
4320 #ifdef BCM_CNIC
4321                 if (!NO_FCOE(bp)) {
4322                         cl_id = bnx2x_fcoe(bp, cl_id);
4323                         bnx2x_rxq_set_mac_filters(bp, cl_id,
4324                                                   BNX2X_ACCEPT_UNICAST |
4325                                                   BNX2X_ACCEPT_MULTICAST);
4326                 }
4327 #endif
4328                 break;
4329
4330         case BNX2X_RX_MODE_ALLMULTI:
4331                 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4332                                 BNX2X_ACCEPT_ALL_MULTICAST;
4333 #ifdef BCM_CNIC
4334                 /*
4335                  *  Prevent duplication of multicast packets by configuring FCoE
4336                  *  L2 Client to receive only matched unicast frames.
4337                  */
4338                 if (!NO_FCOE(bp)) {
4339                         cl_id = bnx2x_fcoe(bp, cl_id);
4340                         bnx2x_rxq_set_mac_filters(bp, cl_id,
4341                                                   BNX2X_ACCEPT_UNICAST);
4342                 }
4343 #endif
4344                 break;
4345
4346         case BNX2X_RX_MODE_PROMISC:
4347                 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4348 #ifdef BCM_CNIC
4349                 /*
4350                  *  Prevent packets duplication by configuring DROP_ALL for FCoE
4351                  *  L2 Client.
4352                  */
4353                 if (!NO_FCOE(bp)) {
4354                         cl_id = bnx2x_fcoe(bp, cl_id);
4355                         bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4356                 }
4357 #endif
4358                 /* pass management unicast packets as well */
4359                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4360                 break;
4361
4362         default:
4363                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4364                 break;
4365         }
4366
4367         cl_id = BP_L_ID(bp);
4368         bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4369
4370         REG_WR(bp,
4371                (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4372                        NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
4373
4374         DP(NETIF_MSG_IFUP, "rx mode %d\n"
4375                 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4376                 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4377                 "unmatched_ucast 0x%x\n", mode,
4378                 bp->mac_filters.ucast_drop_all,
4379                 bp->mac_filters.mcast_drop_all,
4380                 bp->mac_filters.bcast_drop_all,
4381                 bp->mac_filters.ucast_accept_all,
4382                 bp->mac_filters.mcast_accept_all,
4383                 bp->mac_filters.bcast_accept_all,
4384                 bp->mac_filters.unmatched_unicast
4385         );
4386
4387         storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4388 }
4389
4390 static void bnx2x_init_internal_common(struct bnx2x *bp)
4391 {
4392         int i;
4393
4394         if (!CHIP_IS_E1(bp)) {
4395
4396                 /* xstorm needs to know whether to add  ovlan to packets or not,
4397                  * in switch-independent we'll write 0 to here... */
4398                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4399                         bp->mf_mode);
4400                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4401                         bp->mf_mode);
4402                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4403                         bp->mf_mode);
4404                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4405                         bp->mf_mode);
4406         }
4407
4408         if (IS_MF_SI(bp))
4409                 /*
4410                  * In switch independent mode, the TSTORM needs to accept
4411                  * packets that failed classification, since approximate match
4412                  * mac addresses aren't written to NIG LLH
4413                  */
4414                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4415                             TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4416
4417         /* Zero this manually as its initialization is
4418            currently missing in the initTool */
4419         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4420                 REG_WR(bp, BAR_USTRORM_INTMEM +
4421                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4422         if (CHIP_IS_E2(bp)) {
4423                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4424                         CHIP_INT_MODE_IS_BC(bp) ?
4425                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4426         }
4427 }
4428
4429 static void bnx2x_init_internal_port(struct bnx2x *bp)
4430 {
4431         /* port */
4432         bnx2x_dcb_init_intmem_pfc(bp);
4433 }
4434
4435 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4436 {
4437         switch (load_code) {
4438         case FW_MSG_CODE_DRV_LOAD_COMMON:
4439         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4440                 bnx2x_init_internal_common(bp);
4441                 /* no break */
4442
4443         case FW_MSG_CODE_DRV_LOAD_PORT:
4444                 bnx2x_init_internal_port(bp);
4445                 /* no break */
4446
4447         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4448                 /* internal memory per function is
4449                    initialized inside bnx2x_pf_init */
4450                 break;
4451
4452         default:
4453                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4454                 break;
4455         }
4456 }
4457
4458 static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4459 {
4460         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4461
4462         fp->state = BNX2X_FP_STATE_CLOSED;
4463
4464         fp->index = fp->cid = fp_idx;
4465         fp->cl_id = BP_L_ID(bp) + fp_idx;
4466         fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4467         fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4468         /* qZone id equals to FW (per path) client id */
4469         fp->cl_qzone_id  = fp->cl_id +
4470                            BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4471                                 ETH_MAX_RX_CLIENTS_E1H);
4472         /* init shortcut */
4473         fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4474                             USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4475                             USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4476         /* Setup SB indicies */
4477         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4478         fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4479
4480         DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  "
4481                                    "cl_id %d  fw_sb %d  igu_sb %d\n",
4482                    fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4483                    fp->igu_sb_id);
4484         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4485                       fp->fw_sb_id, fp->igu_sb_id);
4486
4487         bnx2x_update_fpsb_idx(fp);
4488 }
4489
4490 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4491 {
4492         int i;
4493
4494         for_each_eth_queue(bp, i)
4495                 bnx2x_init_fp_sb(bp, i);
4496 #ifdef BCM_CNIC
4497         if (!NO_FCOE(bp))
4498                 bnx2x_init_fcoe_fp(bp);
4499
4500         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4501                       BNX2X_VF_ID_INVALID, false,
4502                       CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4503
4504 #endif
4505
4506         /* ensure status block indices were read */
4507         rmb();
4508
4509         bnx2x_init_def_sb(bp);
4510         bnx2x_update_dsb_idx(bp);
4511         bnx2x_init_rx_rings(bp);
4512         bnx2x_init_tx_rings(bp);
4513         bnx2x_init_sp_ring(bp);
4514         bnx2x_init_eq_ring(bp);
4515         bnx2x_init_internal(bp, load_code);
4516         bnx2x_pf_init(bp);
4517         bnx2x_init_ind_table(bp);
4518         bnx2x_stats_init(bp);
4519
4520         /* At this point, we are ready for interrupts */
4521         atomic_set(&bp->intr_sem, 0);
4522
4523         /* flush all before enabling interrupts */
4524         mb();
4525         mmiowb();
4526
4527         bnx2x_int_enable(bp);
4528
4529         /* Check for SPIO5 */
4530         bnx2x_attn_int_deasserted0(bp,
4531                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4532                                    AEU_INPUTS_ATTN_BITS_SPIO5);
4533 }
4534
4535 /* end of nic init */
4536
4537 /*
4538  * gzip service functions
4539  */
4540
4541 static int bnx2x_gunzip_init(struct bnx2x *bp)
4542 {
4543         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4544                                             &bp->gunzip_mapping, GFP_KERNEL);
4545         if (bp->gunzip_buf  == NULL)
4546                 goto gunzip_nomem1;
4547
4548         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4549         if (bp->strm  == NULL)
4550                 goto gunzip_nomem2;
4551
4552         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4553                                       GFP_KERNEL);
4554         if (bp->strm->workspace == NULL)
4555                 goto gunzip_nomem3;
4556
4557         return 0;
4558
4559 gunzip_nomem3:
4560         kfree(bp->strm);
4561         bp->strm = NULL;
4562
4563 gunzip_nomem2:
4564         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4565                           bp->gunzip_mapping);
4566         bp->gunzip_buf = NULL;
4567
4568 gunzip_nomem1:
4569         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4570                " un-compression\n");
4571         return -ENOMEM;
4572 }
4573
4574 static void bnx2x_gunzip_end(struct bnx2x *bp)
4575 {
4576         kfree(bp->strm->workspace);
4577         kfree(bp->strm);
4578         bp->strm = NULL;
4579
4580         if (bp->gunzip_buf) {
4581                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4582                                   bp->gunzip_mapping);
4583                 bp->gunzip_buf = NULL;
4584         }
4585 }
4586
4587 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4588 {
4589         int n, rc;
4590
4591         /* check gzip header */
4592         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4593                 BNX2X_ERR("Bad gzip header\n");
4594                 return -EINVAL;
4595         }
4596
4597         n = 10;
4598
4599 #define FNAME                           0x8
4600
4601         if (zbuf[3] & FNAME)
4602                 while ((zbuf[n++] != 0) && (n < len));
4603
4604         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4605         bp->strm->avail_in = len - n;
4606         bp->strm->next_out = bp->gunzip_buf;
4607         bp->strm->avail_out = FW_BUF_SIZE;
4608
4609         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4610         if (rc != Z_OK)
4611                 return rc;
4612
4613         rc = zlib_inflate(bp->strm, Z_FINISH);
4614         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4615                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4616                            bp->strm->msg);
4617
4618         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4619         if (bp->gunzip_outlen & 0x3)
4620                 netdev_err(bp->dev, "Firmware decompression error:"
4621                                     " gunzip_outlen (%d) not aligned\n",
4622                                 bp->gunzip_outlen);
4623         bp->gunzip_outlen >>= 2;
4624
4625         zlib_inflateEnd(bp->strm);
4626
4627         if (rc == Z_STREAM_END)
4628                 return 0;
4629
4630         return rc;
4631 }
4632
4633 /* nic load/unload */
4634
4635 /*
4636  * General service functions
4637  */
4638
4639 /* send a NIG loopback debug packet */
4640 static void bnx2x_lb_pckt(struct bnx2x *bp)
4641 {
4642         u32 wb_write[3];
4643
4644         /* Ethernet source and destination addresses */
4645         wb_write[0] = 0x55555555;
4646         wb_write[1] = 0x55555555;
4647         wb_write[2] = 0x20;             /* SOP */
4648         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4649
4650         /* NON-IP protocol */
4651         wb_write[0] = 0x09000000;
4652         wb_write[1] = 0x55555555;
4653         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4654         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4655 }
4656
4657 /* some of the internal memories
4658  * are not directly readable from the driver
4659  * to test them we send debug packets
4660  */
4661 static int bnx2x_int_mem_test(struct bnx2x *bp)
4662 {
4663         int factor;
4664         int count, i;
4665         u32 val = 0;
4666
4667         if (CHIP_REV_IS_FPGA(bp))
4668                 factor = 120;
4669         else if (CHIP_REV_IS_EMUL(bp))
4670                 factor = 200;
4671         else
4672                 factor = 1;
4673
4674         /* Disable inputs of parser neighbor blocks */
4675         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4676         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4677         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4678         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4679
4680         /*  Write 0 to parser credits for CFC search request */
4681         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4682
4683         /* send Ethernet packet */
4684         bnx2x_lb_pckt(bp);
4685
4686         /* TODO do i reset NIG statistic? */
4687         /* Wait until NIG register shows 1 packet of size 0x10 */
4688         count = 1000 * factor;
4689         while (count) {
4690
4691                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4692                 val = *bnx2x_sp(bp, wb_data[0]);
4693                 if (val == 0x10)
4694                         break;
4695
4696                 msleep(10);
4697                 count--;
4698         }
4699         if (val != 0x10) {
4700                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4701                 return -1;
4702         }
4703
4704         /* Wait until PRS register shows 1 packet */
4705         count = 1000 * factor;
4706         while (count) {
4707                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4708                 if (val == 1)
4709                         break;
4710
4711                 msleep(10);
4712                 count--;
4713         }
4714         if (val != 0x1) {
4715                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4716                 return -2;
4717         }
4718
4719         /* Reset and init BRB, PRS */
4720         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4721         msleep(50);
4722         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4723         msleep(50);
4724         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4725         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4726
4727         DP(NETIF_MSG_HW, "part2\n");
4728
4729         /* Disable inputs of parser neighbor blocks */
4730         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4731         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4732         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4733         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4734
4735         /* Write 0 to parser credits for CFC search request */
4736         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4737
4738         /* send 10 Ethernet packets */
4739         for (i = 0; i < 10; i++)
4740                 bnx2x_lb_pckt(bp);
4741
4742         /* Wait until NIG register shows 10 + 1
4743            packets of size 11*0x10 = 0xb0 */
4744         count = 1000 * factor;
4745         while (count) {
4746
4747                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4748                 val = *bnx2x_sp(bp, wb_data[0]);
4749                 if (val == 0xb0)
4750                         break;
4751
4752                 msleep(10);
4753                 count--;
4754         }
4755         if (val != 0xb0) {
4756                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4757                 return -3;
4758         }
4759
4760         /* Wait until PRS register shows 2 packets */
4761         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4762         if (val != 2)
4763                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4764
4765         /* Write 1 to parser credits for CFC search request */
4766         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4767
4768         /* Wait until PRS register shows 3 packets */
4769         msleep(10 * factor);
4770         /* Wait until NIG register shows 1 packet of size 0x10 */
4771         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4772         if (val != 3)
4773                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4774
4775         /* clear NIG EOP FIFO */
4776         for (i = 0; i < 11; i++)
4777                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4778         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4779         if (val != 1) {
4780                 BNX2X_ERR("clear of NIG failed\n");
4781                 return -4;
4782         }
4783
4784         /* Reset and init BRB, PRS, NIG */
4785         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4786         msleep(50);
4787         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4788         msleep(50);
4789         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4790         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4791 #ifndef BCM_CNIC
4792         /* set NIC mode */
4793         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4794 #endif
4795
4796         /* Enable inputs of parser neighbor blocks */
4797         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4798         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4799         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4800         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4801
4802         DP(NETIF_MSG_HW, "done\n");
4803
4804         return 0; /* OK */
4805 }
4806
4807 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
4808 {
4809         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4810         if (CHIP_IS_E2(bp))
4811                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4812         else
4813                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4814         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4815         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4816         /*
4817          * mask read length error interrupts in brb for parser
4818          * (parsing unit and 'checksum and crc' unit)
4819          * these errors are legal (PU reads fixed length and CAC can cause
4820          * read length error on truncated packets)
4821          */
4822         REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4823         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4824         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4825         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4826         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4827         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4828 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4829 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4830         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4831         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4832         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4833 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4834 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4835         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4836         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4837         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4838         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4839 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4840 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4841
4842         if (CHIP_REV_IS_FPGA(bp))
4843                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4844         else if (CHIP_IS_E2(bp))
4845                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4846                            (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4847                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4848                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4849                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4850                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4851         else
4852                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4853         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4854         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4855         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4856 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4857 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4858         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4859         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4860 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4861         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);         /* bit 3,4 masked */
4862 }
4863
4864 static void bnx2x_reset_common(struct bnx2x *bp)
4865 {
4866         /* reset_common */
4867         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4868                0xd3ffff7f);
4869         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4870 }
4871
4872 static void bnx2x_init_pxp(struct bnx2x *bp)
4873 {
4874         u16 devctl;
4875         int r_order, w_order;
4876
4877         pci_read_config_word(bp->pdev,
4878                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4879         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4880         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4881         if (bp->mrrs == -1)
4882                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4883         else {
4884                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4885                 r_order = bp->mrrs;
4886         }
4887
4888         bnx2x_init_pxp_arb(bp, r_order, w_order);
4889 }
4890
4891 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4892 {
4893         int is_required;
4894         u32 val;
4895         int port;
4896
4897         if (BP_NOMCP(bp))
4898                 return;
4899
4900         is_required = 0;
4901         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4902               SHARED_HW_CFG_FAN_FAILURE_MASK;
4903
4904         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4905                 is_required = 1;
4906
4907         /*
4908          * The fan failure mechanism is usually related to the PHY type since
4909          * the power consumption of the board is affected by the PHY. Currently,
4910          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4911          */
4912         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4913                 for (port = PORT_0; port < PORT_MAX; port++) {
4914                         is_required |=
4915                                 bnx2x_fan_failure_det_req(
4916                                         bp,
4917                                         bp->common.shmem_base,
4918                                         bp->common.shmem2_base,
4919                                         port);
4920                 }
4921
4922         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4923
4924         if (is_required == 0)
4925                 return;
4926
4927         /* Fan failure is indicated by SPIO 5 */
4928         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4929                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
4930
4931         /* set to active low mode */
4932         val = REG_RD(bp, MISC_REG_SPIO_INT);
4933         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4934                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4935         REG_WR(bp, MISC_REG_SPIO_INT, val);
4936
4937         /* enable interrupt to signal the IGU */
4938         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4939         val |= (1 << MISC_REGISTERS_SPIO_5);
4940         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4941 }
4942
4943 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4944 {
4945         u32 offset = 0;
4946
4947         if (CHIP_IS_E1(bp))
4948                 return;
4949         if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4950                 return;
4951
4952         switch (BP_ABS_FUNC(bp)) {
4953         case 0:
4954                 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4955                 break;
4956         case 1:
4957                 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4958                 break;
4959         case 2:
4960                 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4961                 break;
4962         case 3:
4963                 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4964                 break;
4965         case 4:
4966                 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4967                 break;
4968         case 5:
4969                 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4970                 break;
4971         case 6:
4972                 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4973                 break;
4974         case 7:
4975                 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4976                 break;
4977         default:
4978                 return;
4979         }
4980
4981         REG_WR(bp, offset, pretend_func_num);
4982         REG_RD(bp, offset);
4983         DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4984 }
4985
4986 static void bnx2x_pf_disable(struct bnx2x *bp)
4987 {
4988         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4989         val &= ~IGU_PF_CONF_FUNC_EN;
4990
4991         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4992         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4993         REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4994 }
4995
4996 static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4997 {
4998         u32 val, i;
4999
5000         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_ABS_FUNC(bp));
5001
5002         bnx2x_reset_common(bp);
5003         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5004         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5005
5006         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5007         if (!CHIP_IS_E1(bp))
5008                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
5009
5010         if (CHIP_IS_E2(bp)) {
5011                 u8 fid;
5012
5013                 /**
5014                  * 4-port mode or 2-port mode we need to turn of master-enable
5015                  * for everyone, after that, turn it back on for self.
5016                  * so, we disregard multi-function or not, and always disable
5017                  * for all functions on the given path, this means 0,2,4,6 for
5018                  * path 0 and 1,3,5,7 for path 1
5019                  */
5020                 for (fid = BP_PATH(bp); fid  < E2_FUNC_MAX*2; fid += 2) {
5021                         if (fid == BP_ABS_FUNC(bp)) {
5022                                 REG_WR(bp,
5023                                     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
5024                                     1);
5025                                 continue;
5026                         }
5027
5028                         bnx2x_pretend_func(bp, fid);
5029                         /* clear pf enable */
5030                         bnx2x_pf_disable(bp);
5031                         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5032                 }
5033         }
5034
5035         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5036         if (CHIP_IS_E1(bp)) {
5037                 /* enable HW interrupt from PXP on USDM overflow
5038                    bit 16 on INT_MASK_0 */
5039                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5040         }
5041
5042         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5043         bnx2x_init_pxp(bp);
5044
5045 #ifdef __BIG_ENDIAN
5046         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5047         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5048         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5049         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5050         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5051         /* make sure this value is 0 */
5052         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5053
5054 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5055         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5056         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5057         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5058         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5059 #endif
5060
5061         bnx2x_ilt_init_page_size(bp, INITOP_SET);
5062
5063         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5064                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5065
5066         /* let the HW do it's magic ... */
5067         msleep(100);
5068         /* finish PXP init */
5069         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5070         if (val != 1) {
5071                 BNX2X_ERR("PXP2 CFG failed\n");
5072                 return -EBUSY;
5073         }
5074         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5075         if (val != 1) {
5076                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5077                 return -EBUSY;
5078         }
5079
5080         /* Timers bug workaround E2 only. We need to set the entire ILT to
5081          * have entries with value "0" and valid bit on.
5082          * This needs to be done by the first PF that is loaded in a path
5083          * (i.e. common phase)
5084          */
5085         if (CHIP_IS_E2(bp)) {
5086                 struct ilt_client_info ilt_cli;
5087                 struct bnx2x_ilt ilt;
5088                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5089                 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5090
5091                 /* initialize dummy TM client */
5092                 ilt_cli.start = 0;
5093                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5094                 ilt_cli.client_num = ILT_CLIENT_TM;
5095
5096                 /* Step 1: set zeroes to all ilt page entries with valid bit on
5097                  * Step 2: set the timers first/last ilt entry to point
5098                  * to the entire range to prevent ILT range error for 3rd/4th
5099                  * vnic (this code assumes existance of the vnic)
5100                  *
5101                  * both steps performed by call to bnx2x_ilt_client_init_op()
5102                  * with dummy TM client
5103                  *
5104                  * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5105                  * and his brother are split registers
5106                  */
5107                 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5108                 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5109                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5110
5111                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5112                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5113                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5114         }
5115
5116
5117         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5118         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5119
5120         if (CHIP_IS_E2(bp)) {
5121                 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5122                                 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5123                 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5124
5125                 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5126
5127                 /* let the HW do it's magic ... */
5128                 do {
5129                         msleep(200);
5130                         val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5131                 } while (factor-- && (val != 1));
5132
5133                 if (val != 1) {
5134                         BNX2X_ERR("ATC_INIT failed\n");
5135                         return -EBUSY;
5136                 }
5137         }
5138
5139         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5140
5141         /* clean the DMAE memory */
5142         bp->dmae_ready = 1;
5143         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5144
5145         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5146         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5147         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5148         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5149
5150         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5151         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5152         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5153         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5154
5155         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5156
5157         if (CHIP_MODE_IS_4_PORT(bp))
5158                 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5159
5160         /* QM queues pointers table */
5161         bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5162
5163         /* soft reset pulse */
5164         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5165         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5166
5167 #ifdef BCM_CNIC
5168         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5169 #endif
5170
5171         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5172         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5173
5174         if (!CHIP_REV_IS_SLOW(bp)) {
5175                 /* enable hw interrupt from doorbell Q */
5176                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5177         }
5178
5179         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5180         if (CHIP_MODE_IS_4_PORT(bp)) {
5181                 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5182                 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5183         }
5184
5185         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5186         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5187 #ifndef BCM_CNIC
5188         /* set NIC mode */
5189         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5190 #endif
5191         if (!CHIP_IS_E1(bp))
5192                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
5193
5194         if (CHIP_IS_E2(bp)) {
5195                 /* Bit-map indicating which L2 hdrs may appear after the
5196                    basic Ethernet header */
5197                 int has_ovlan = IS_MF_SD(bp);
5198                 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5199                 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5200         }
5201
5202         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5203         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5204         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5205         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5206
5207         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5208         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5209         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5210         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5211
5212         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5213         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5214         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5215         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5216
5217         if (CHIP_MODE_IS_4_PORT(bp))
5218                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5219
5220         /* sync semi rtc */
5221         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5222                0x80000000);
5223         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5224                0x80000000);
5225
5226         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5227         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5228         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5229
5230         if (CHIP_IS_E2(bp)) {
5231                 int has_ovlan = IS_MF_SD(bp);
5232                 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5233                 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5234         }
5235
5236         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5237         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5238                 REG_WR(bp, i, random32());
5239
5240         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5241 #ifdef BCM_CNIC
5242         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5243         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5244         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5245         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5246         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5247         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5248         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5249         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5250         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5251         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5252 #endif
5253         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5254
5255         if (sizeof(union cdu_context) != 1024)
5256                 /* we currently assume that a context is 1024 bytes */
5257                 dev_alert(&bp->pdev->dev, "please adjust the size "
5258                                           "of cdu_context(%ld)\n",
5259                          (long)sizeof(union cdu_context));
5260
5261         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5262         val = (4 << 24) + (0 << 12) + 1024;
5263         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5264
5265         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5266         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5267         /* enable context validation interrupt from CFC */
5268         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5269
5270         /* set the thresholds to prevent CFC/CDU race */
5271         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5272
5273         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5274
5275         if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5276                 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5277
5278         bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
5279         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5280
5281         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5282         /* Reset PCIE errors for debug */
5283         REG_WR(bp, 0x2814, 0xffffffff);
5284         REG_WR(bp, 0x3820, 0xffffffff);
5285
5286         if (CHIP_IS_E2(bp)) {
5287                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5288                            (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5289                                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5290                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5291                            (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5292                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5293                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5294                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5295                            (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5296                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5297                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5298         }
5299
5300         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5301         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5302         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5303         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5304
5305         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5306         if (!CHIP_IS_E1(bp)) {
5307                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5308                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
5309         }
5310         if (CHIP_IS_E2(bp)) {
5311                 /* Bit-map indicating which L2 hdrs may appear after the
5312                    basic Ethernet header */
5313                 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
5314         }
5315
5316         if (CHIP_REV_IS_SLOW(bp))
5317                 msleep(200);
5318
5319         /* finish CFC init */
5320         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5321         if (val != 1) {
5322                 BNX2X_ERR("CFC LL_INIT failed\n");
5323                 return -EBUSY;
5324         }
5325         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5326         if (val != 1) {
5327                 BNX2X_ERR("CFC AC_INIT failed\n");
5328                 return -EBUSY;
5329         }
5330         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5331         if (val != 1) {
5332                 BNX2X_ERR("CFC CAM_INIT failed\n");
5333                 return -EBUSY;
5334         }
5335         REG_WR(bp, CFC_REG_DEBUG0, 0);
5336
5337         if (CHIP_IS_E1(bp)) {
5338                 /* read NIG statistic
5339                    to see if this is our first up since powerup */
5340                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5341                 val = *bnx2x_sp(bp, wb_data[0]);
5342
5343                 /* do internal memory self test */
5344                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5345                         BNX2X_ERR("internal mem self test failed\n");
5346                         return -EBUSY;
5347                 }
5348         }
5349
5350         bnx2x_setup_fan_failure_detection(bp);
5351
5352         /* clear PXP2 attentions */
5353         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5354
5355         bnx2x_enable_blocks_attention(bp);
5356         if (CHIP_PARITY_ENABLED(bp))
5357                 bnx2x_enable_blocks_parity(bp);
5358
5359         if (!BP_NOMCP(bp)) {
5360                 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5361                 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5362                     CHIP_IS_E1x(bp)) {
5363                         u32 shmem_base[2], shmem2_base[2];
5364                         shmem_base[0] =  bp->common.shmem_base;
5365                         shmem2_base[0] = bp->common.shmem2_base;
5366                         if (CHIP_IS_E2(bp)) {
5367                                 shmem_base[1] =
5368                                         SHMEM2_RD(bp, other_shmem_base_addr);
5369                                 shmem2_base[1] =
5370                                         SHMEM2_RD(bp, other_shmem2_base_addr);
5371                         }
5372                         bnx2x_acquire_phy_lock(bp);
5373                         bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5374                                               bp->common.chip_id);
5375                         bnx2x_release_phy_lock(bp);
5376                 }
5377         } else
5378                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5379
5380         return 0;
5381 }
5382
5383 static int bnx2x_init_hw_port(struct bnx2x *bp)
5384 {
5385         int port = BP_PORT(bp);
5386         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5387         u32 low, high;
5388         u32 val;
5389
5390         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
5391
5392         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5393
5394         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5395         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5396
5397         /* Timers bug workaround: disables the pf_master bit in pglue at
5398          * common phase, we need to enable it here before any dmae access are
5399          * attempted. Therefore we manually added the enable-master to the
5400          * port phase (it also happens in the function phase)
5401          */
5402         if (CHIP_IS_E2(bp))
5403                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5404
5405         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5406         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5407         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5408         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5409
5410         /* QM cid (connection) count */
5411         bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
5412
5413 #ifdef BCM_CNIC
5414         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5415         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5416         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5417 #endif
5418
5419         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5420
5421         if (CHIP_MODE_IS_4_PORT(bp))
5422                 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5423
5424         if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5425                 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5426                 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5427                         /* no pause for emulation and FPGA */
5428                         low = 0;
5429                         high = 513;
5430                 } else {
5431                         if (IS_MF(bp))
5432                                 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5433                         else if (bp->dev->mtu > 4096) {
5434                                 if (bp->flags & ONE_PORT_FLAG)
5435                                         low = 160;
5436                                 else {
5437                                         val = bp->dev->mtu;
5438                                         /* (24*1024 + val*4)/256 */
5439                                         low = 96 + (val/64) +
5440                                                         ((val % 64) ? 1 : 0);
5441                                 }
5442                         } else
5443                                 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5444                         high = low + 56;        /* 14*1024/256 */
5445                 }
5446                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5447                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5448         }
5449
5450         if (CHIP_MODE_IS_4_PORT(bp)) {
5451                 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5452                 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5453                 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5454                                           BRB1_REG_MAC_GUARANTIED_0), 40);
5455         }
5456
5457         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5458
5459         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5460         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5461         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5462         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5463
5464         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5465         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5466         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5467         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5468         if (CHIP_MODE_IS_4_PORT(bp))
5469                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
5470
5471         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5472         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5473
5474         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5475
5476         if (!CHIP_IS_E2(bp)) {
5477                 /* configure PBF to work without PAUSE mtu 9000 */
5478                 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5479
5480                 /* update threshold */
5481                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5482                 /* update init credit */
5483                 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5484
5485                 /* probe changes */
5486                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5487                 udelay(50);
5488                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5489         }
5490
5491 #ifdef BCM_CNIC
5492         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5493 #endif
5494         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5495         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5496
5497         if (CHIP_IS_E1(bp)) {
5498                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5499                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5500         }
5501         bnx2x_init_block(bp, HC_BLOCK, init_stage);
5502
5503         bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5504
5505         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5506         /* init aeu_mask_attn_func_0/1:
5507          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5508          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5509          *             bits 4-7 are used for "per vn group attention" */
5510         val = IS_MF(bp) ? 0xF7 : 0x7;
5511         /* Enable DCBX attention for all but E1 */
5512         val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5513         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
5514
5515         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5516         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5517         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5518         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5519         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5520
5521         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5522
5523         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5524
5525         if (!CHIP_IS_E1(bp)) {
5526                 /* 0x2 disable mf_ov, 0x1 enable */
5527                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5528                        (IS_MF_SD(bp) ? 0x1 : 0x2));
5529
5530                 if (CHIP_IS_E2(bp)) {
5531                         val = 0;
5532                         switch (bp->mf_mode) {
5533                         case MULTI_FUNCTION_SD:
5534                                 val = 1;
5535                                 break;
5536                         case MULTI_FUNCTION_SI:
5537                                 val = 2;
5538                                 break;
5539                         }
5540
5541                         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5542                                                   NIG_REG_LLH0_CLS_TYPE), val);
5543                 }
5544                 {
5545                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5546                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5547                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5548                 }
5549         }
5550
5551         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5552         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5553         if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5554                                       bp->common.shmem2_base, port)) {
5555                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5556                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5557                 val = REG_RD(bp, reg_addr);
5558                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5559                 REG_WR(bp, reg_addr, val);
5560         }
5561         bnx2x__link_reset(bp);
5562
5563         return 0;
5564 }
5565
5566 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5567 {
5568         int reg;
5569
5570         if (CHIP_IS_E1(bp))
5571                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5572         else
5573                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5574
5575         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5576 }
5577
5578 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5579 {
5580         bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5581 }
5582
5583 static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5584 {
5585         u32 i, base = FUNC_ILT_BASE(func);
5586         for (i = base; i < base + ILT_PER_FUNC; i++)
5587                 bnx2x_ilt_wr(bp, i, 0);
5588 }
5589
5590 static int bnx2x_init_hw_func(struct bnx2x *bp)
5591 {
5592         int port = BP_PORT(bp);
5593         int func = BP_FUNC(bp);
5594         struct bnx2x_ilt *ilt = BP_ILT(bp);
5595         u16 cdu_ilt_start;
5596         u32 addr, val;
5597         u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5598         int i, main_mem_width;
5599
5600         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
5601
5602         /* set MSI reconfigure capability */
5603         if (bp->common.int_block == INT_BLOCK_HC) {
5604                 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5605                 val = REG_RD(bp, addr);
5606                 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5607                 REG_WR(bp, addr, val);
5608         }
5609
5610         ilt = BP_ILT(bp);
5611         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
5612
5613         for (i = 0; i < L2_ILT_LINES(bp); i++) {
5614                 ilt->lines[cdu_ilt_start + i].page =
5615                         bp->context.vcxt + (ILT_PAGE_CIDS * i);
5616                 ilt->lines[cdu_ilt_start + i].page_mapping =
5617                         bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5618                 /* cdu ilt pages are allocated manually so there's no need to
5619                 set the size */
5620         }
5621         bnx2x_ilt_init_op(bp, INITOP_SET);
5622
5623 #ifdef BCM_CNIC
5624         bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5625
5626         /* T1 hash bits value determines the T1 number of entries */
5627         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5628 #endif
5629
5630 #ifndef BCM_CNIC
5631         /* set NIC mode */
5632         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5633 #endif  /* BCM_CNIC */
5634
5635         if (CHIP_IS_E2(bp)) {
5636                 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5637
5638                 /* Turn on a single ISR mode in IGU if driver is going to use
5639                  * INT#x or MSI
5640                  */
5641                 if (!(bp->flags & USING_MSIX_FLAG))
5642                         pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5643                 /*
5644                  * Timers workaround bug: function init part.
5645                  * Need to wait 20msec after initializing ILT,
5646                  * needed to make sure there are no requests in
5647                  * one of the PXP internal queues with "old" ILT addresses
5648                  */
5649                 msleep(20);
5650                 /*
5651                  * Master enable - Due to WB DMAE writes performed before this
5652                  * register is re-initialized as part of the regular function
5653                  * init
5654                  */
5655                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5656                 /* Enable the function in IGU */
5657                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5658         }
5659
5660         bp->dmae_ready = 1;
5661
5662         bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5663
5664         if (CHIP_IS_E2(bp))
5665                 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5666
5667         bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5668         bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5669         bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5670         bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5671         bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5672         bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5673         bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5674         bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5675         bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5676
5677         if (CHIP_IS_E2(bp)) {
5678                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5679                                                                 BP_PATH(bp));
5680                 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5681                                                                 BP_PATH(bp));
5682         }
5683
5684         if (CHIP_MODE_IS_4_PORT(bp))
5685                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5686
5687         if (CHIP_IS_E2(bp))
5688                 REG_WR(bp, QM_REG_PF_EN, 1);
5689
5690         bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5691
5692         if (CHIP_MODE_IS_4_PORT(bp))
5693                 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5694
5695         bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5696         bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5697         bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5698         bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5699         bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5700         bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5701         bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5702         bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5703         bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5704         bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5705         bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5706         if (CHIP_IS_E2(bp))
5707                 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5708
5709         bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5710
5711         bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5712
5713         if (CHIP_IS_E2(bp))
5714                 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5715
5716         if (IS_MF(bp)) {
5717                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5718                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
5719         }
5720
5721         bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5722
5723         /* HC init per function */
5724         if (bp->common.int_block == INT_BLOCK_HC) {
5725                 if (CHIP_IS_E1H(bp)) {
5726                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5727
5728                         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5729                         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5730                 }
5731                 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5732
5733         } else {
5734                 int num_segs, sb_idx, prod_offset;
5735
5736                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5737
5738                 if (CHIP_IS_E2(bp)) {
5739                         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5740                         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5741                 }
5742
5743                 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5744
5745                 if (CHIP_IS_E2(bp)) {
5746                         int dsb_idx = 0;
5747                         /**
5748                          * Producer memory:
5749                          * E2 mode: address 0-135 match to the mapping memory;
5750                          * 136 - PF0 default prod; 137 - PF1 default prod;
5751                          * 138 - PF2 default prod; 139 - PF3 default prod;
5752                          * 140 - PF0 attn prod;    141 - PF1 attn prod;
5753                          * 142 - PF2 attn prod;    143 - PF3 attn prod;
5754                          * 144-147 reserved.
5755                          *
5756                          * E1.5 mode - In backward compatible mode;
5757                          * for non default SB; each even line in the memory
5758                          * holds the U producer and each odd line hold
5759                          * the C producer. The first 128 producers are for
5760                          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5761                          * producers are for the DSB for each PF.
5762                          * Each PF has five segments: (the order inside each
5763                          * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5764                          * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5765                          * 144-147 attn prods;
5766                          */
5767                         /* non-default-status-blocks */
5768                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5769                                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5770                         for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5771                                 prod_offset = (bp->igu_base_sb + sb_idx) *
5772                                         num_segs;
5773
5774                                 for (i = 0; i < num_segs; i++) {
5775                                         addr = IGU_REG_PROD_CONS_MEMORY +
5776                                                         (prod_offset + i) * 4;
5777                                         REG_WR(bp, addr, 0);
5778                                 }
5779                                 /* send consumer update with value 0 */
5780                                 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5781                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5782                                 bnx2x_igu_clear_sb(bp,
5783                                                    bp->igu_base_sb + sb_idx);
5784                         }
5785
5786                         /* default-status-blocks */
5787                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5788                                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5789
5790                         if (CHIP_MODE_IS_4_PORT(bp))
5791                                 dsb_idx = BP_FUNC(bp);
5792                         else
5793                                 dsb_idx = BP_E1HVN(bp);
5794
5795                         prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5796                                        IGU_BC_BASE_DSB_PROD + dsb_idx :
5797                                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
5798
5799                         for (i = 0; i < (num_segs * E1HVN_MAX);
5800                              i += E1HVN_MAX) {
5801                                 addr = IGU_REG_PROD_CONS_MEMORY +
5802                                                         (prod_offset + i)*4;
5803                                 REG_WR(bp, addr, 0);
5804                         }
5805                         /* send consumer update with 0 */
5806                         if (CHIP_INT_MODE_IS_BC(bp)) {
5807                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5808                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5809                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5810                                              CSTORM_ID, 0, IGU_INT_NOP, 1);
5811                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5812                                              XSTORM_ID, 0, IGU_INT_NOP, 1);
5813                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5814                                              TSTORM_ID, 0, IGU_INT_NOP, 1);
5815                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5816                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5817                         } else {
5818                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5819                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5820                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5821                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5822                         }
5823                         bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5824
5825                         /* !!! these should become driver const once
5826                            rf-tool supports split-68 const */
5827                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5828                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5829                         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5830                         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5831                         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5832                         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5833                 }
5834         }
5835
5836         /* Reset PCIE errors for debug */
5837         REG_WR(bp, 0x2114, 0xffffffff);
5838         REG_WR(bp, 0x2120, 0xffffffff);
5839
5840         bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5841         bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5842         bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5843         bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5844         bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5845         bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5846
5847         if (CHIP_IS_E1x(bp)) {
5848                 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5849                 main_mem_base = HC_REG_MAIN_MEMORY +
5850                                 BP_PORT(bp) * (main_mem_size * 4);
5851                 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5852                 main_mem_width = 8;
5853
5854                 val = REG_RD(bp, main_mem_prty_clr);
5855                 if (val)
5856                         DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5857                                           "block during "
5858                                           "function init (0x%x)!\n", val);
5859
5860                 /* Clear "false" parity errors in MSI-X table */
5861                 for (i = main_mem_base;
5862                      i < main_mem_base + main_mem_size * 4;
5863                      i += main_mem_width) {
5864                         bnx2x_read_dmae(bp, i, main_mem_width / 4);
5865                         bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5866                                          i, main_mem_width / 4);
5867                 }
5868                 /* Clear HC parity attention */
5869                 REG_RD(bp, main_mem_prty_clr);
5870         }
5871
5872         bnx2x_phy_probe(&bp->link_params);
5873
5874         return 0;
5875 }
5876
5877 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5878 {
5879         int rc = 0;
5880
5881         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5882            BP_ABS_FUNC(bp), load_code);
5883
5884         bp->dmae_ready = 0;
5885         spin_lock_init(&bp->dmae_lock);
5886         rc = bnx2x_gunzip_init(bp);
5887         if (rc)
5888                 return rc;
5889
5890         switch (load_code) {
5891         case FW_MSG_CODE_DRV_LOAD_COMMON:
5892         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5893                 rc = bnx2x_init_hw_common(bp, load_code);
5894                 if (rc)
5895                         goto init_hw_err;
5896                 /* no break */
5897
5898         case FW_MSG_CODE_DRV_LOAD_PORT:
5899                 rc = bnx2x_init_hw_port(bp);
5900                 if (rc)
5901                         goto init_hw_err;
5902                 /* no break */
5903
5904         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5905                 rc = bnx2x_init_hw_func(bp);
5906                 if (rc)
5907                         goto init_hw_err;
5908                 break;
5909
5910         default:
5911                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5912                 break;
5913         }
5914
5915         if (!BP_NOMCP(bp)) {
5916                 int mb_idx = BP_FW_MB_IDX(bp);
5917
5918                 bp->fw_drv_pulse_wr_seq =
5919                                 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
5920                                  DRV_PULSE_SEQ_MASK);
5921                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5922         }
5923
5924 init_hw_err:
5925         bnx2x_gunzip_end(bp);
5926
5927         return rc;
5928 }
5929
5930 void bnx2x_free_mem(struct bnx2x *bp)
5931 {
5932
5933 #define BNX2X_PCI_FREE(x, y, size) \
5934         do { \
5935                 if (x) { \
5936                         dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5937                         x = NULL; \
5938                         y = 0; \
5939                 } \
5940         } while (0)
5941
5942 #define BNX2X_FREE(x) \
5943         do { \
5944                 if (x) { \
5945                         kfree((void *)x); \
5946                         x = NULL; \
5947                 } \
5948         } while (0)
5949
5950         int i;
5951
5952         /* fastpath */
5953         /* Common */
5954         for_each_queue(bp, i) {
5955 #ifdef BCM_CNIC
5956                 /* FCoE client uses default status block */
5957                 if (IS_FCOE_IDX(i)) {
5958                         union host_hc_status_block *sb =
5959                                 &bnx2x_fp(bp, i, status_blk);
5960                         memset(sb, 0, sizeof(union host_hc_status_block));
5961                         bnx2x_fp(bp, i, status_blk_mapping) = 0;
5962                 } else {
5963 #endif
5964                 /* status blocks */
5965                 if (CHIP_IS_E2(bp))
5966                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5967                                        bnx2x_fp(bp, i, status_blk_mapping),
5968                                        sizeof(struct host_hc_status_block_e2));
5969                 else
5970                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5971                                        bnx2x_fp(bp, i, status_blk_mapping),
5972                                        sizeof(struct host_hc_status_block_e1x));
5973 #ifdef BCM_CNIC
5974                 }
5975 #endif
5976         }
5977         /* Rx */
5978         for_each_rx_queue(bp, i) {
5979
5980                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5981                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5982                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5983                                bnx2x_fp(bp, i, rx_desc_mapping),
5984                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5985
5986                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5987                                bnx2x_fp(bp, i, rx_comp_mapping),
5988                                sizeof(struct eth_fast_path_rx_cqe) *
5989                                NUM_RCQ_BD);
5990
5991                 /* SGE ring */
5992                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5993                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5994                                bnx2x_fp(bp, i, rx_sge_mapping),
5995                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5996         }
5997         /* Tx */
5998         for_each_tx_queue(bp, i) {
5999
6000                 /* fastpath tx rings: tx_buf tx_desc */
6001                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6002                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6003                                bnx2x_fp(bp, i, tx_desc_mapping),
6004                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6005         }
6006         /* end of fastpath */
6007
6008         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6009                        sizeof(struct host_sp_status_block));
6010
6011         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6012                        sizeof(struct bnx2x_slowpath));
6013
6014         BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
6015                        bp->context.size);
6016
6017         bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
6018
6019         BNX2X_FREE(bp->ilt->lines);
6020
6021 #ifdef BCM_CNIC
6022         if (CHIP_IS_E2(bp))
6023                 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
6024                                sizeof(struct host_hc_status_block_e2));
6025         else
6026                 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
6027                                sizeof(struct host_hc_status_block_e1x));
6028
6029         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
6030 #endif
6031
6032         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6033
6034         BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6035                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
6036
6037         BNX2X_FREE(bp->rx_indir_table);
6038
6039 #undef BNX2X_PCI_FREE
6040 #undef BNX2X_KFREE
6041 }
6042
6043 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6044 {
6045         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6046         if (CHIP_IS_E2(bp)) {
6047                 bnx2x_fp(bp, index, sb_index_values) =
6048                         (__le16 *)status_blk.e2_sb->sb.index_values;
6049                 bnx2x_fp(bp, index, sb_running_index) =
6050                         (__le16 *)status_blk.e2_sb->sb.running_index;
6051         } else {
6052                 bnx2x_fp(bp, index, sb_index_values) =
6053                         (__le16 *)status_blk.e1x_sb->sb.index_values;
6054                 bnx2x_fp(bp, index, sb_running_index) =
6055                         (__le16 *)status_blk.e1x_sb->sb.running_index;
6056         }
6057 }
6058
6059 int bnx2x_alloc_mem(struct bnx2x *bp)
6060 {
6061 #define BNX2X_PCI_ALLOC(x, y, size) \
6062         do { \
6063                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6064                 if (x == NULL) \
6065                         goto alloc_mem_err; \
6066                 memset(x, 0, size); \
6067         } while (0)
6068
6069 #define BNX2X_ALLOC(x, size) \
6070         do { \
6071                 x = kzalloc(size, GFP_KERNEL); \
6072                 if (x == NULL) \
6073                         goto alloc_mem_err; \
6074         } while (0)
6075
6076         int i;
6077
6078         /* fastpath */
6079         /* Common */
6080         for_each_queue(bp, i) {
6081                 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
6082                 bnx2x_fp(bp, i, bp) = bp;
6083                 /* status blocks */
6084 #ifdef BCM_CNIC
6085                 if (!IS_FCOE_IDX(i)) {
6086 #endif
6087                         if (CHIP_IS_E2(bp))
6088                                 BNX2X_PCI_ALLOC(sb->e2_sb,
6089                                     &bnx2x_fp(bp, i, status_blk_mapping),
6090                                     sizeof(struct host_hc_status_block_e2));
6091                         else
6092                                 BNX2X_PCI_ALLOC(sb->e1x_sb,
6093                                     &bnx2x_fp(bp, i, status_blk_mapping),
6094                                     sizeof(struct host_hc_status_block_e1x));
6095 #ifdef BCM_CNIC
6096                 }
6097 #endif
6098                 set_sb_shortcuts(bp, i);
6099         }
6100         /* Rx */
6101         for_each_queue(bp, i) {
6102
6103                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6104                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6105                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6106                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6107                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6108                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6109
6110                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6111                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6112                                 sizeof(struct eth_fast_path_rx_cqe) *
6113                                 NUM_RCQ_BD);
6114
6115                 /* SGE ring */
6116                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6117                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6118                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6119                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6120                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6121         }
6122         /* Tx */
6123         for_each_queue(bp, i) {
6124
6125                 /* fastpath tx rings: tx_buf tx_desc */
6126                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6127                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6128                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6129                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6130                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6131         }
6132         /* end of fastpath */
6133
6134 #ifdef BCM_CNIC
6135         if (CHIP_IS_E2(bp))
6136                 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6137                                 sizeof(struct host_hc_status_block_e2));
6138         else
6139                 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6140                                 sizeof(struct host_hc_status_block_e1x));
6141
6142         /* allocate searcher T2 table */
6143         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6144 #endif
6145
6146
6147         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6148                         sizeof(struct host_sp_status_block));
6149
6150         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6151                         sizeof(struct bnx2x_slowpath));
6152
6153         bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
6154
6155         BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6156                         bp->context.size);
6157
6158         BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
6159
6160         if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6161                 goto alloc_mem_err;
6162
6163         /* Slow path ring */
6164         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6165
6166         /* EQ */
6167         BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6168                         BCM_PAGE_SIZE * NUM_EQ_PAGES);
6169
6170         BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
6171                     TSTORM_INDIRECTION_TABLE_SIZE);
6172         return 0;
6173
6174 alloc_mem_err:
6175         bnx2x_free_mem(bp);
6176         return -ENOMEM;
6177
6178 #undef BNX2X_PCI_ALLOC
6179 #undef BNX2X_ALLOC
6180 }
6181
6182 /*
6183  * Init service functions
6184  */
6185 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6186                              int *state_p, int flags);
6187
6188 int bnx2x_func_start(struct bnx2x *bp)
6189 {
6190         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
6191
6192         /* Wait for completion */
6193         return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6194                                  WAIT_RAMROD_COMMON);
6195 }
6196
6197 static int bnx2x_func_stop(struct bnx2x *bp)
6198 {
6199         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6200
6201         /* Wait for completion */
6202         return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6203                                       0, &(bp->state), WAIT_RAMROD_COMMON);
6204 }
6205
6206 /**
6207  * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6208  *
6209  * @param bp driver descriptor
6210  * @param set set or clear an entry (1 or 0)
6211  * @param mac pointer to a buffer containing a MAC
6212  * @param cl_bit_vec bit vector of clients to register a MAC for
6213  * @param cam_offset offset in a CAM to use
6214  * @param is_bcast is the set MAC a broadcast address (for E1 only)
6215  */
6216 static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6217                                    u32 cl_bit_vec, u8 cam_offset,
6218                                    u8 is_bcast)
6219 {
6220         struct mac_configuration_cmd *config =
6221                 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6222         int ramrod_flags = WAIT_RAMROD_COMMON;
6223
6224         bp->set_mac_pending = 1;
6225
6226         config->hdr.length = 1;
6227         config->hdr.offset = cam_offset;
6228         config->hdr.client_id = 0xff;
6229         /* Mark the single MAC configuration ramrod as opposed to a
6230          * UC/MC list configuration).
6231          */
6232         config->hdr.echo = 1;
6233
6234         /* primary MAC */
6235         config->config_table[0].msb_mac_addr =
6236                                         swab16(*(u16 *)&mac[0]);
6237         config->config_table[0].middle_mac_addr =
6238                                         swab16(*(u16 *)&mac[2]);
6239         config->config_table[0].lsb_mac_addr =
6240                                         swab16(*(u16 *)&mac[4]);
6241         config->config_table[0].clients_bit_vector =
6242                                         cpu_to_le32(cl_bit_vec);
6243         config->config_table[0].vlan_id = 0;
6244         config->config_table[0].pf_id = BP_FUNC(bp);
6245         if (set)
6246                 SET_FLAG(config->config_table[0].flags,
6247                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6248                         T_ETH_MAC_COMMAND_SET);
6249         else
6250                 SET_FLAG(config->config_table[0].flags,
6251                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6252                         T_ETH_MAC_COMMAND_INVALIDATE);
6253
6254         if (is_bcast)
6255                 SET_FLAG(config->config_table[0].flags,
6256                         MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6257
6258         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  PF_ID %d  CLID mask %d\n",
6259            (set ? "setting" : "clearing"),
6260            config->config_table[0].msb_mac_addr,
6261            config->config_table[0].middle_mac_addr,
6262            config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6263
6264         mb();
6265
6266         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6267                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6268                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6269
6270         /* Wait for a completion */
6271         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6272 }
6273
6274 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6275                              int *state_p, int flags)
6276 {
6277         /* can take a while if any port is running */
6278         int cnt = 5000;
6279         u8 poll = flags & WAIT_RAMROD_POLL;
6280         u8 common = flags & WAIT_RAMROD_COMMON;
6281
6282         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6283            poll ? "polling" : "waiting", state, idx);
6284
6285         might_sleep();
6286         while (cnt--) {
6287                 if (poll) {
6288                         if (common)
6289                                 bnx2x_eq_int(bp);
6290                         else {
6291                                 bnx2x_rx_int(bp->fp, 10);
6292                                 /* if index is different from 0
6293                                  * the reply for some commands will
6294                                  * be on the non default queue
6295                                  */
6296                                 if (idx)
6297                                         bnx2x_rx_int(&bp->fp[idx], 10);
6298                         }
6299                 }
6300
6301                 mb(); /* state is changed by bnx2x_sp_event() */
6302                 if (*state_p == state) {
6303 #ifdef BNX2X_STOP_ON_ERROR
6304                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6305 #endif
6306                         return 0;
6307                 }
6308
6309                 msleep(1);
6310
6311                 if (bp->panic)
6312                         return -EIO;
6313         }
6314
6315         /* timeout! */
6316         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6317                   poll ? "polling" : "waiting", state, idx);
6318 #ifdef BNX2X_STOP_ON_ERROR
6319         bnx2x_panic();
6320 #endif
6321
6322         return -EBUSY;
6323 }
6324
6325 static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6326 {
6327         if (CHIP_IS_E1H(bp))
6328                 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6329         else if (CHIP_MODE_IS_4_PORT(bp))
6330                 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
6331         else
6332                 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
6333 }
6334
6335 /**
6336  *  LLH CAM line allocations: currently only iSCSI and ETH macs are
6337  *  relevant. In addition, current implementation is tuned for a
6338  *  single ETH MAC.
6339  */
6340 enum {
6341         LLH_CAM_ISCSI_ETH_LINE = 0,
6342         LLH_CAM_ETH_LINE,
6343         LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6344 };
6345
6346 static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6347                           int set,
6348                           unsigned char *dev_addr,
6349                           int index)
6350 {
6351         u32 wb_data[2];
6352         u32 mem_offset, ena_offset, mem_index;
6353         /**
6354          * indexes mapping:
6355          * 0..7 - goes to MEM
6356          * 8..15 - goes to MEM2
6357          */
6358
6359         if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6360                 return;
6361
6362         /* calculate memory start offset according to the mapping
6363          * and index in the memory */
6364         if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6365                 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6366                                            NIG_REG_LLH0_FUNC_MEM;
6367                 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6368                                            NIG_REG_LLH0_FUNC_MEM_ENABLE;
6369                 mem_index = index;
6370         } else {
6371                 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6372                                            NIG_REG_P0_LLH_FUNC_MEM2;
6373                 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6374                                            NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6375                 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6376         }
6377
6378         if (set) {
6379                 /* LLH_FUNC_MEM is a u64 WB register */
6380                 mem_offset += 8*mem_index;
6381
6382                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6383                               (dev_addr[4] <<  8) |  dev_addr[5]);
6384                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
6385
6386                 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6387         }
6388
6389         /* enable/disable the entry */
6390         REG_WR(bp, ena_offset + 4*mem_index, set);
6391
6392 }
6393
6394 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6395 {
6396         u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6397                          bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6398
6399         /* networking  MAC */
6400         bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6401                                (1 << bp->fp->cl_id), cam_offset , 0);
6402
6403         bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6404
6405         if (CHIP_IS_E1(bp)) {
6406                 /* broadcast MAC */
6407                 static const u8 bcast[ETH_ALEN] = {
6408                         0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6409                 };
6410                 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6411         }
6412 }
6413
6414 static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6415 {
6416         return CHIP_REV_IS_SLOW(bp) ?
6417                 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6418                 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6419 }
6420
6421 /* set mc list, do not wait as wait implies sleep and
6422  * set_rx_mode can be invoked from non-sleepable context.
6423  *
6424  * Instead we use the same ramrod data buffer each time we need
6425  * to configure a list of addresses, and use the fact that the
6426  * list of MACs is changed in an incremental way and that the
6427  * function is called under the netif_addr_lock. A temporary
6428  * inconsistent CAM configuration (possible in case of a very fast
6429  * sequence of add/del/add on the host side) will shortly be
6430  * restored by the handler of the last ramrod.
6431  */
6432 static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
6433 {
6434         int i = 0, old;
6435         struct net_device *dev = bp->dev;
6436         u8 offset = bnx2x_e1_cam_mc_offset(bp);
6437         struct netdev_hw_addr *ha;
6438         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6439         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6440
6441         if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6442                 return -EINVAL;
6443
6444         netdev_for_each_mc_addr(ha, dev) {
6445                 /* copy mac */
6446                 config_cmd->config_table[i].msb_mac_addr =
6447                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6448                 config_cmd->config_table[i].middle_mac_addr =
6449                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6450                 config_cmd->config_table[i].lsb_mac_addr =
6451                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6452
6453                 config_cmd->config_table[i].vlan_id = 0;
6454                 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6455                 config_cmd->config_table[i].clients_bit_vector =
6456                         cpu_to_le32(1 << BP_L_ID(bp));
6457
6458                 SET_FLAG(config_cmd->config_table[i].flags,
6459                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6460                         T_ETH_MAC_COMMAND_SET);
6461
6462                 DP(NETIF_MSG_IFUP,
6463                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6464                    config_cmd->config_table[i].msb_mac_addr,
6465                    config_cmd->config_table[i].middle_mac_addr,
6466                    config_cmd->config_table[i].lsb_mac_addr);
6467                 i++;
6468         }
6469         old = config_cmd->hdr.length;
6470         if (old > i) {
6471                 for (; i < old; i++) {
6472                         if (CAM_IS_INVALID(config_cmd->
6473                                            config_table[i])) {
6474                                 /* already invalidated */
6475                                 break;
6476                         }
6477                         /* invalidate */
6478                         SET_FLAG(config_cmd->config_table[i].flags,
6479                                 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6480                                 T_ETH_MAC_COMMAND_INVALIDATE);
6481                 }
6482         }
6483
6484         wmb();
6485
6486         config_cmd->hdr.length = i;
6487         config_cmd->hdr.offset = offset;
6488         config_cmd->hdr.client_id = 0xff;
6489         /* Mark that this ramrod doesn't use bp->set_mac_pending for
6490          * synchronization.
6491          */
6492         config_cmd->hdr.echo = 0;
6493
6494         mb();
6495
6496         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6497                    U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6498 }
6499
6500 void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
6501 {
6502         int i;
6503         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6504         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6505         int ramrod_flags = WAIT_RAMROD_COMMON;
6506         u8 offset = bnx2x_e1_cam_mc_offset(bp);
6507
6508         for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
6509                 SET_FLAG(config_cmd->config_table[i].flags,
6510                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6511                         T_ETH_MAC_COMMAND_INVALIDATE);
6512
6513         wmb();
6514
6515         config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6516         config_cmd->hdr.offset = offset;
6517         config_cmd->hdr.client_id = 0xff;
6518         /* We'll wait for a completion this time... */
6519         config_cmd->hdr.echo = 1;
6520
6521         bp->set_mac_pending = 1;
6522
6523         mb();
6524
6525         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6526                       U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6527
6528         /* Wait for a completion */
6529         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6530                                 ramrod_flags);
6531
6532 }
6533
6534 /* Accept one or more multicasts */
6535 static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6536 {
6537         struct net_device *dev = bp->dev;
6538         struct netdev_hw_addr *ha;
6539         u32 mc_filter[MC_HASH_SIZE];
6540         u32 crc, bit, regidx;
6541         int i;
6542
6543         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6544
6545         netdev_for_each_mc_addr(ha, dev) {
6546                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6547                    bnx2x_mc_addr(ha));
6548
6549                 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6550                                 ETH_ALEN);
6551                 bit = (crc >> 24) & 0xff;
6552                 regidx = bit >> 5;
6553                 bit &= 0x1f;
6554                 mc_filter[regidx] |= (1 << bit);
6555         }
6556
6557         for (i = 0; i < MC_HASH_SIZE; i++)
6558                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6559                        mc_filter[i]);
6560
6561         return 0;
6562 }
6563
6564 void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6565 {
6566         int i;
6567
6568         for (i = 0; i < MC_HASH_SIZE; i++)
6569                 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6570 }
6571
6572 #ifdef BCM_CNIC
6573 /**
6574  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6575  * MAC(s). This function will wait until the ramdord completion
6576  * returns.
6577  *
6578  * @param bp driver handle
6579  * @param set set or clear the CAM entry
6580  *
6581  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6582  */
6583 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6584 {
6585         u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6586                          bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6587         u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6588                 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6589         u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6590         u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
6591
6592         /* Send a SET_MAC ramrod */
6593         bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
6594                                cam_offset, 0);
6595
6596         bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6597
6598         return 0;
6599 }
6600
6601 /**
6602  * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6603  * ETH MAC(s). This function will wait until the ramdord
6604  * completion returns.
6605  *
6606  * @param bp driver handle
6607  * @param set set or clear the CAM entry
6608  *
6609  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6610  */
6611 int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6612 {
6613         u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6614         /**
6615          * CAM allocation for E1H
6616          * eth unicasts: by func number
6617          * iscsi: by func number
6618          * fip unicast: by func number
6619          * fip multicast: by func number
6620          */
6621         bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6622                 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6623
6624         return 0;
6625 }
6626
6627 int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6628 {
6629         u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6630
6631         /**
6632          * CAM allocation for E1H
6633          * eth unicasts: by func number
6634          * iscsi: by func number
6635          * fip unicast: by func number
6636          * fip multicast: by func number
6637          */
6638         bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6639                 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6640
6641         return 0;
6642 }
6643 #endif
6644
6645 static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6646                                     struct bnx2x_client_init_params *params,
6647                                     u8 activate,
6648                                     struct client_init_ramrod_data *data)
6649 {
6650         /* Clear the buffer */
6651         memset(data, 0, sizeof(*data));
6652
6653         /* general */
6654         data->general.client_id = params->rxq_params.cl_id;
6655         data->general.statistics_counter_id = params->rxq_params.stat_id;
6656         data->general.statistics_en_flg =
6657                 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6658         data->general.is_fcoe_flg =
6659                 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
6660         data->general.activate_flg = activate;
6661         data->general.sp_client_id = params->rxq_params.spcl_id;
6662
6663         /* Rx data */
6664         data->rx.tpa_en_flg =
6665                 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6666         data->rx.vmqueue_mode_en_flg = 0;
6667         data->rx.cache_line_alignment_log_size =
6668                 params->rxq_params.cache_line_log;
6669         data->rx.enable_dynamic_hc =
6670                 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6671         data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6672         data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6673         data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6674
6675         /* We don't set drop flags */
6676         data->rx.drop_ip_cs_err_flg = 0;
6677         data->rx.drop_tcp_cs_err_flg = 0;
6678         data->rx.drop_ttl0_flg = 0;
6679         data->rx.drop_udp_cs_err_flg = 0;
6680
6681         data->rx.inner_vlan_removal_enable_flg =
6682                 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6683         data->rx.outer_vlan_removal_enable_flg =
6684                 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6685         data->rx.status_block_id = params->rxq_params.fw_sb_id;
6686         data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6687         data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6688         data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6689         data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6690         data->rx.bd_page_base.lo =
6691                 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6692         data->rx.bd_page_base.hi =
6693                 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6694         data->rx.sge_page_base.lo =
6695                 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6696         data->rx.sge_page_base.hi =
6697                 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6698         data->rx.cqe_page_base.lo =
6699                 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6700         data->rx.cqe_page_base.hi =
6701                 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6702         data->rx.is_leading_rss =
6703                 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6704         data->rx.is_approx_mcast = data->rx.is_leading_rss;
6705
6706         /* Tx data */
6707         data->tx.enforce_security_flg = 0; /* VF specific */
6708         data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6709         data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6710         data->tx.mtu = 0; /* VF specific */
6711         data->tx.tx_bd_page_base.lo =
6712                 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6713         data->tx.tx_bd_page_base.hi =
6714                 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6715
6716         /* flow control data */
6717         data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6718         data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6719         data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6720         data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6721         data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6722         data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6723         data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6724
6725         data->fc.safc_group_num = params->txq_params.cos;
6726         data->fc.safc_group_en_flg =
6727                 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6728         data->fc.traffic_type =
6729                 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6730                 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
6731 }
6732
6733 static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6734 {
6735         /* ustorm cxt validation */
6736         cxt->ustorm_ag_context.cdu_usage =
6737                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6738                                        ETH_CONNECTION_TYPE);
6739         /* xcontext validation */
6740         cxt->xstorm_ag_context.cdu_reserved =
6741                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6742                                        ETH_CONNECTION_TYPE);
6743 }
6744
6745 static int bnx2x_setup_fw_client(struct bnx2x *bp,
6746                                  struct bnx2x_client_init_params *params,
6747                                  u8 activate,
6748                                  struct client_init_ramrod_data *data,
6749                                  dma_addr_t data_mapping)
6750 {
6751         u16 hc_usec;
6752         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6753         int ramrod_flags = 0, rc;
6754
6755         /* HC and context validation values */
6756         hc_usec = params->txq_params.hc_rate ?
6757                 1000000 / params->txq_params.hc_rate : 0;
6758         bnx2x_update_coalesce_sb_index(bp,
6759                         params->txq_params.fw_sb_id,
6760                         params->txq_params.sb_cq_index,
6761                         !(params->txq_params.flags & QUEUE_FLG_HC),
6762                         hc_usec);
6763
6764         *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6765
6766         hc_usec = params->rxq_params.hc_rate ?
6767                 1000000 / params->rxq_params.hc_rate : 0;
6768         bnx2x_update_coalesce_sb_index(bp,
6769                         params->rxq_params.fw_sb_id,
6770                         params->rxq_params.sb_cq_index,
6771                         !(params->rxq_params.flags & QUEUE_FLG_HC),
6772                         hc_usec);
6773
6774         bnx2x_set_ctx_validation(params->rxq_params.cxt,
6775                                  params->rxq_params.cid);
6776
6777         /* zero stats */
6778         if (params->txq_params.flags & QUEUE_FLG_STATS)
6779                 storm_memset_xstats_zero(bp, BP_PORT(bp),
6780                                          params->txq_params.stat_id);
6781
6782         if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6783                 storm_memset_ustats_zero(bp, BP_PORT(bp),
6784                                          params->rxq_params.stat_id);
6785                 storm_memset_tstats_zero(bp, BP_PORT(bp),
6786                                          params->rxq_params.stat_id);
6787         }
6788
6789         /* Fill the ramrod data */
6790         bnx2x_fill_cl_init_data(bp, params, activate, data);
6791
6792         /* SETUP ramrod.
6793          *
6794          * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6795          * barrier except from mmiowb() is needed to impose a
6796          * proper ordering of memory operations.
6797          */
6798         mmiowb();
6799
6800
6801         bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6802                       U64_HI(data_mapping), U64_LO(data_mapping), 0);
6803
6804         /* Wait for completion */
6805         rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6806                                  params->ramrod_params.index,
6807                                  params->ramrod_params.pstate,
6808                                  ramrod_flags);
6809         return rc;
6810 }
6811
6812 /**
6813  * Configure interrupt mode according to current configuration.
6814  * In case of MSI-X it will also try to enable MSI-X.
6815  *
6816  * @param bp
6817  *
6818  * @return int
6819  */
6820 static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6821 {
6822         int rc = 0;
6823
6824         switch (bp->int_mode) {
6825         case INT_MODE_MSI:
6826                 bnx2x_enable_msi(bp);
6827                 /* falling through... */
6828         case INT_MODE_INTx:
6829                 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6830                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6831                 break;
6832         default:
6833                 /* Set number of queues according to bp->multi_mode value */
6834                 bnx2x_set_num_queues(bp);
6835
6836                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6837                    bp->num_queues);
6838
6839                 /* if we can't use MSI-X we only need one fp,
6840                  * so try to enable MSI-X with the requested number of fp's
6841                  * and fallback to MSI or legacy INTx with one fp
6842                  */
6843                 rc = bnx2x_enable_msix(bp);
6844                 if (rc) {
6845                         /* failed to enable MSI-X */
6846                         if (bp->multi_mode)
6847                                 DP(NETIF_MSG_IFUP,
6848                                           "Multi requested but failed to "
6849                                           "enable MSI-X (%d), "
6850                                           "set number of queues to %d\n",
6851                                    bp->num_queues,
6852                                    1 + NONE_ETH_CONTEXT_USE);
6853                         bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6854
6855                         if (!(bp->flags & DISABLE_MSI_FLAG))
6856                                 bnx2x_enable_msi(bp);
6857                 }
6858
6859                 break;
6860         }
6861
6862         return rc;
6863 }
6864
6865 /* must be called prioir to any HW initializations */
6866 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6867 {
6868         return L2_ILT_LINES(bp);
6869 }
6870
6871 void bnx2x_ilt_set_info(struct bnx2x *bp)
6872 {
6873         struct ilt_client_info *ilt_client;
6874         struct bnx2x_ilt *ilt = BP_ILT(bp);
6875         u16 line = 0;
6876
6877         ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6878         DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6879
6880         /* CDU */
6881         ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6882         ilt_client->client_num = ILT_CLIENT_CDU;
6883         ilt_client->page_size = CDU_ILT_PAGE_SZ;
6884         ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6885         ilt_client->start = line;
6886         line += L2_ILT_LINES(bp);
6887 #ifdef BCM_CNIC
6888         line += CNIC_ILT_LINES;
6889 #endif
6890         ilt_client->end = line - 1;
6891
6892         DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6893                                          "flags 0x%x, hw psz %d\n",
6894            ilt_client->start,
6895            ilt_client->end,
6896            ilt_client->page_size,
6897            ilt_client->flags,
6898            ilog2(ilt_client->page_size >> 12));
6899
6900         /* QM */
6901         if (QM_INIT(bp->qm_cid_count)) {
6902                 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6903                 ilt_client->client_num = ILT_CLIENT_QM;
6904                 ilt_client->page_size = QM_ILT_PAGE_SZ;
6905                 ilt_client->flags = 0;
6906                 ilt_client->start = line;
6907
6908                 /* 4 bytes for each cid */
6909                 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6910                                                          QM_ILT_PAGE_SZ);
6911
6912                 ilt_client->end = line - 1;
6913
6914                 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6915                                                  "flags 0x%x, hw psz %d\n",
6916                    ilt_client->start,
6917                    ilt_client->end,
6918                    ilt_client->page_size,
6919                    ilt_client->flags,
6920                    ilog2(ilt_client->page_size >> 12));
6921
6922         }
6923         /* SRC */
6924         ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6925 #ifdef BCM_CNIC
6926         ilt_client->client_num = ILT_CLIENT_SRC;
6927         ilt_client->page_size = SRC_ILT_PAGE_SZ;
6928         ilt_client->flags = 0;
6929         ilt_client->start = line;
6930         line += SRC_ILT_LINES;
6931         ilt_client->end = line - 1;
6932
6933         DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6934                                          "flags 0x%x, hw psz %d\n",
6935            ilt_client->start,
6936            ilt_client->end,
6937            ilt_client->page_size,
6938            ilt_client->flags,
6939            ilog2(ilt_client->page_size >> 12));
6940
6941 #else
6942         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6943 #endif
6944
6945         /* TM */
6946         ilt_client = &ilt->clients[ILT_CLIENT_TM];
6947 #ifdef BCM_CNIC
6948         ilt_client->client_num = ILT_CLIENT_TM;
6949         ilt_client->page_size = TM_ILT_PAGE_SZ;
6950         ilt_client->flags = 0;
6951         ilt_client->start = line;
6952         line += TM_ILT_LINES;
6953         ilt_client->end = line - 1;
6954
6955         DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6956                                          "flags 0x%x, hw psz %d\n",
6957            ilt_client->start,
6958            ilt_client->end,
6959            ilt_client->page_size,
6960            ilt_client->flags,
6961            ilog2(ilt_client->page_size >> 12));
6962
6963 #else
6964         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6965 #endif
6966 }
6967
6968 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6969                        int is_leading)
6970 {
6971         struct bnx2x_client_init_params params = { {0} };
6972         int rc;
6973
6974         /* reset IGU state skip FCoE L2 queue */
6975         if (!IS_FCOE_FP(fp))
6976                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6977                              IGU_INT_ENABLE, 0);
6978
6979         params.ramrod_params.pstate = &fp->state;
6980         params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6981         params.ramrod_params.index = fp->index;
6982         params.ramrod_params.cid = fp->cid;
6983
6984 #ifdef BCM_CNIC
6985         if (IS_FCOE_FP(fp))
6986                 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6987
6988 #endif
6989
6990         if (is_leading)
6991                 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6992
6993         bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6994
6995         bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6996
6997         rc = bnx2x_setup_fw_client(bp, &params, 1,
6998                                      bnx2x_sp(bp, client_init_data),
6999                                      bnx2x_sp_mapping(bp, client_init_data));
7000         return rc;
7001 }
7002
7003 static int bnx2x_stop_fw_client(struct bnx2x *bp,
7004                                 struct bnx2x_client_ramrod_params *p)
7005 {
7006         int rc;
7007
7008         int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
7009
7010         /* halt the connection */
7011         *p->pstate = BNX2X_FP_STATE_HALTING;
7012         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
7013                                                   p->cl_id, 0);
7014
7015         /* Wait for completion */
7016         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
7017                                p->pstate, poll_flag);
7018         if (rc) /* timeout */
7019                 return rc;
7020
7021         *p->pstate = BNX2X_FP_STATE_TERMINATING;
7022         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
7023                                                        p->cl_id, 0);
7024         /* Wait for completion */
7025         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
7026                                p->pstate, poll_flag);
7027         if (rc) /* timeout */
7028                 return rc;
7029
7030
7031         /* delete cfc entry */
7032         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
7033
7034         /* Wait for completion */
7035         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
7036                                p->pstate, WAIT_RAMROD_COMMON);
7037         return rc;
7038 }
7039
7040 static int bnx2x_stop_client(struct bnx2x *bp, int index)
7041 {
7042         struct bnx2x_client_ramrod_params client_stop = {0};
7043         struct bnx2x_fastpath *fp = &bp->fp[index];
7044
7045         client_stop.index = index;
7046         client_stop.cid = fp->cid;
7047         client_stop.cl_id = fp->cl_id;
7048         client_stop.pstate = &(fp->state);
7049         client_stop.poll = 0;
7050
7051         return bnx2x_stop_fw_client(bp, &client_stop);
7052 }
7053
7054
7055 static void bnx2x_reset_func(struct bnx2x *bp)
7056 {
7057         int port = BP_PORT(bp);
7058         int func = BP_FUNC(bp);
7059         int i;
7060         int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
7061                         (CHIP_IS_E2(bp) ?
7062                          offsetof(struct hc_status_block_data_e2, common) :
7063                          offsetof(struct hc_status_block_data_e1x, common));
7064         int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
7065         int pfid_offset = offsetof(struct pci_entity, pf_id);
7066
7067         /* Disable the function in the FW */
7068         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
7069         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
7070         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
7071         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
7072
7073         /* FP SBs */
7074         for_each_eth_queue(bp, i) {
7075                 struct bnx2x_fastpath *fp = &bp->fp[i];
7076                 REG_WR8(bp,
7077                         BAR_CSTRORM_INTMEM +
7078                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
7079                         + pfunc_offset_fp + pfid_offset,
7080                         HC_FUNCTION_DISABLED);
7081         }
7082
7083         /* SP SB */
7084         REG_WR8(bp,
7085                 BAR_CSTRORM_INTMEM +
7086                 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
7087                 pfunc_offset_sp + pfid_offset,
7088                 HC_FUNCTION_DISABLED);
7089
7090
7091         for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
7092                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
7093                        0);
7094
7095         /* Configure IGU */
7096         if (bp->common.int_block == INT_BLOCK_HC) {
7097                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7098                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7099         } else {
7100                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7101                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7102         }
7103
7104 #ifdef BCM_CNIC
7105         /* Disable Timer scan */
7106         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7107         /*
7108          * Wait for at least 10ms and up to 2 second for the timers scan to
7109          * complete
7110          */
7111         for (i = 0; i < 200; i++) {
7112                 msleep(10);
7113                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7114                         break;
7115         }
7116 #endif
7117         /* Clear ILT */
7118         bnx2x_clear_func_ilt(bp, func);
7119
7120         /* Timers workaround bug for E2: if this is vnic-3,
7121          * we need to set the entire ilt range for this timers.
7122          */
7123         if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
7124                 struct ilt_client_info ilt_cli;
7125                 /* use dummy TM client */
7126                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7127                 ilt_cli.start = 0;
7128                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7129                 ilt_cli.client_num = ILT_CLIENT_TM;
7130
7131                 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7132         }
7133
7134         /* this assumes that reset_port() called before reset_func()*/
7135         if (CHIP_IS_E2(bp))
7136                 bnx2x_pf_disable(bp);
7137
7138         bp->dmae_ready = 0;
7139 }
7140
7141 static void bnx2x_reset_port(struct bnx2x *bp)
7142 {
7143         int port = BP_PORT(bp);
7144         u32 val;
7145
7146         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7147
7148         /* Do not rcv packets to BRB */
7149         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7150         /* Do not direct rcv packets that are not for MCP to the BRB */
7151         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7152                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7153
7154         /* Configure AEU */
7155         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7156
7157         msleep(100);
7158         /* Check for BRB port occupancy */
7159         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7160         if (val)
7161                 DP(NETIF_MSG_IFDOWN,
7162                    "BRB1 is not empty  %d blocks are occupied\n", val);
7163
7164         /* TODO: Close Doorbell port? */
7165 }
7166
7167 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7168 {
7169         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7170            BP_ABS_FUNC(bp), reset_code);
7171
7172         switch (reset_code) {
7173         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7174                 bnx2x_reset_port(bp);
7175                 bnx2x_reset_func(bp);
7176                 bnx2x_reset_common(bp);
7177                 break;
7178
7179         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7180                 bnx2x_reset_port(bp);
7181                 bnx2x_reset_func(bp);
7182                 break;
7183
7184         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7185                 bnx2x_reset_func(bp);
7186                 break;
7187
7188         default:
7189                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7190                 break;
7191         }
7192 }
7193
7194 #ifdef BCM_CNIC
7195 static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7196 {
7197         if (bp->flags & FCOE_MACS_SET) {
7198                 if (!IS_MF_SD(bp))
7199                         bnx2x_set_fip_eth_mac_addr(bp, 0);
7200
7201                 bnx2x_set_all_enode_macs(bp, 0);
7202
7203                 bp->flags &= ~FCOE_MACS_SET;
7204         }
7205 }
7206 #endif
7207
7208 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7209 {
7210         int port = BP_PORT(bp);
7211         u32 reset_code = 0;
7212         int i, cnt, rc;
7213
7214         /* Wait until tx fastpath tasks complete */
7215         for_each_tx_queue(bp, i) {
7216                 struct bnx2x_fastpath *fp = &bp->fp[i];
7217
7218                 cnt = 1000;
7219                 while (bnx2x_has_tx_work_unload(fp)) {
7220
7221                         if (!cnt) {
7222                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7223                                           i);
7224 #ifdef BNX2X_STOP_ON_ERROR
7225                                 bnx2x_panic();
7226                                 return -EBUSY;
7227 #else
7228                                 break;
7229 #endif
7230                         }
7231                         cnt--;
7232                         msleep(1);
7233                 }
7234         }
7235         /* Give HW time to discard old tx messages */
7236         msleep(1);
7237
7238         bnx2x_set_eth_mac(bp, 0);
7239
7240         bnx2x_invalidate_uc_list(bp);
7241
7242         if (CHIP_IS_E1(bp))
7243                 bnx2x_invalidate_e1_mc_list(bp);
7244         else {
7245                 bnx2x_invalidate_e1h_mc_list(bp);
7246                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7247         }
7248
7249 #ifdef BCM_CNIC
7250         bnx2x_del_fcoe_eth_macs(bp);
7251 #endif
7252
7253         if (unload_mode == UNLOAD_NORMAL)
7254                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7255
7256         else if (bp->flags & NO_WOL_FLAG)
7257                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7258
7259         else if (bp->wol) {
7260                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7261                 u8 *mac_addr = bp->dev->dev_addr;
7262                 u32 val;
7263                 /* The mac address is written to entries 1-4 to
7264                    preserve entry 0 which is used by the PMF */
7265                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7266
7267                 val = (mac_addr[0] << 8) | mac_addr[1];
7268                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7269
7270                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7271                       (mac_addr[4] << 8) | mac_addr[5];
7272                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7273
7274                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7275
7276         } else
7277                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7278
7279         /* Close multi and leading connections
7280            Completions for ramrods are collected in a synchronous way */
7281         for_each_queue(bp, i)
7282
7283                 if (bnx2x_stop_client(bp, i))
7284 #ifdef BNX2X_STOP_ON_ERROR
7285                         return;
7286 #else
7287                         goto unload_error;
7288 #endif
7289
7290         rc = bnx2x_func_stop(bp);
7291         if (rc) {
7292                 BNX2X_ERR("Function stop failed!\n");
7293 #ifdef BNX2X_STOP_ON_ERROR
7294                 return;
7295 #else
7296                 goto unload_error;
7297 #endif
7298         }
7299 #ifndef BNX2X_STOP_ON_ERROR
7300 unload_error:
7301 #endif
7302         if (!BP_NOMCP(bp))
7303                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
7304         else {
7305                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      "
7306                                      "%d, %d, %d\n", BP_PATH(bp),
7307                    load_count[BP_PATH(bp)][0],
7308                    load_count[BP_PATH(bp)][1],
7309                    load_count[BP_PATH(bp)][2]);
7310                 load_count[BP_PATH(bp)][0]--;
7311                 load_count[BP_PATH(bp)][1 + port]--;
7312                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  "
7313                                      "%d, %d, %d\n", BP_PATH(bp),
7314                    load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7315                    load_count[BP_PATH(bp)][2]);
7316                 if (load_count[BP_PATH(bp)][0] == 0)
7317                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7318                 else if (load_count[BP_PATH(bp)][1 + port] == 0)
7319                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7320                 else
7321                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7322         }
7323
7324         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7325             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7326                 bnx2x__link_reset(bp);
7327
7328         /* Disable HW interrupts, NAPI */
7329         bnx2x_netif_stop(bp, 1);
7330
7331         /* Release IRQs */
7332         bnx2x_free_irq(bp);
7333
7334         /* Reset the chip */
7335         bnx2x_reset_chip(bp, reset_code);
7336
7337         /* Report UNLOAD_DONE to MCP */
7338         if (!BP_NOMCP(bp))
7339                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7340
7341 }
7342
7343 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
7344 {
7345         u32 val;
7346
7347         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7348
7349         if (CHIP_IS_E1(bp)) {
7350                 int port = BP_PORT(bp);
7351                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7352                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
7353
7354                 val = REG_RD(bp, addr);
7355                 val &= ~(0x300);
7356                 REG_WR(bp, addr, val);
7357         } else if (CHIP_IS_E1H(bp)) {
7358                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7359                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7360                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7361                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7362         }
7363 }
7364
7365 /* Close gates #2, #3 and #4: */
7366 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7367 {
7368         u32 val, addr;
7369
7370         /* Gates #2 and #4a are closed/opened for "not E1" only */
7371         if (!CHIP_IS_E1(bp)) {
7372                 /* #4 */
7373                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7374                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7375                        close ? (val | 0x1) : (val & (~(u32)1)));
7376                 /* #2 */
7377                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7378                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7379                        close ? (val | 0x1) : (val & (~(u32)1)));
7380         }
7381
7382         /* #3 */
7383         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7384         val = REG_RD(bp, addr);
7385         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7386
7387         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7388                 close ? "closing" : "opening");
7389         mmiowb();
7390 }
7391
7392 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
7393
7394 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7395 {
7396         /* Do some magic... */
7397         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7398         *magic_val = val & SHARED_MF_CLP_MAGIC;
7399         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7400 }
7401
7402 /* Restore the value of the `magic' bit.
7403  *
7404  * @param pdev Device handle.
7405  * @param magic_val Old value of the `magic' bit.
7406  */
7407 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7408 {
7409         /* Restore the `magic' bit value... */
7410         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7411         MF_CFG_WR(bp, shared_mf_config.clp_mb,
7412                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7413 }
7414
7415 /**
7416  * Prepares for MCP reset: takes care of CLP configurations.
7417  *
7418  * @param bp
7419  * @param magic_val Old value of 'magic' bit.
7420  */
7421 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7422 {
7423         u32 shmem;
7424         u32 validity_offset;
7425
7426         DP(NETIF_MSG_HW, "Starting\n");
7427
7428         /* Set `magic' bit in order to save MF config */
7429         if (!CHIP_IS_E1(bp))
7430                 bnx2x_clp_reset_prep(bp, magic_val);
7431
7432         /* Get shmem offset */
7433         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7434         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7435
7436         /* Clear validity map flags */
7437         if (shmem > 0)
7438                 REG_WR(bp, shmem + validity_offset, 0);
7439 }
7440
7441 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
7442 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
7443
7444 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7445  * depending on the HW type.
7446  *
7447  * @param bp
7448  */
7449 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7450 {
7451         /* special handling for emulation and FPGA,
7452            wait 10 times longer */
7453         if (CHIP_REV_IS_SLOW(bp))
7454                 msleep(MCP_ONE_TIMEOUT*10);
7455         else
7456                 msleep(MCP_ONE_TIMEOUT);
7457 }
7458
7459 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7460 {
7461         u32 shmem, cnt, validity_offset, val;
7462         int rc = 0;
7463
7464         msleep(100);
7465
7466         /* Get shmem offset */
7467         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7468         if (shmem == 0) {
7469                 BNX2X_ERR("Shmem 0 return failure\n");
7470                 rc = -ENOTTY;
7471                 goto exit_lbl;
7472         }
7473
7474         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7475
7476         /* Wait for MCP to come up */
7477         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7478                 /* TBD: its best to check validity map of last port.
7479                  * currently checks on port 0.
7480                  */
7481                 val = REG_RD(bp, shmem + validity_offset);
7482                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7483                    shmem + validity_offset, val);
7484
7485                 /* check that shared memory is valid. */
7486                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7487                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7488                         break;
7489
7490                 bnx2x_mcp_wait_one(bp);
7491         }
7492
7493         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7494
7495         /* Check that shared memory is valid. This indicates that MCP is up. */
7496         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7497             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7498                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7499                 rc = -ENOTTY;
7500                 goto exit_lbl;
7501         }
7502
7503 exit_lbl:
7504         /* Restore the `magic' bit value */
7505         if (!CHIP_IS_E1(bp))
7506                 bnx2x_clp_reset_done(bp, magic_val);
7507
7508         return rc;
7509 }
7510
7511 static void bnx2x_pxp_prep(struct bnx2x *bp)
7512 {
7513         if (!CHIP_IS_E1(bp)) {
7514                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7515                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7516                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7517                 mmiowb();
7518         }
7519 }
7520
7521 /*
7522  * Reset the whole chip except for:
7523  *      - PCIE core
7524  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7525  *              one reset bit)
7526  *      - IGU
7527  *      - MISC (including AEU)
7528  *      - GRC
7529  *      - RBCN, RBCP
7530  */
7531 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7532 {
7533         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7534
7535         not_reset_mask1 =
7536                 MISC_REGISTERS_RESET_REG_1_RST_HC |
7537                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7538                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7539
7540         not_reset_mask2 =
7541                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7542                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7543                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7544                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7545                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7546                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
7547                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7548                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7549
7550         reset_mask1 = 0xffffffff;
7551
7552         if (CHIP_IS_E1(bp))
7553                 reset_mask2 = 0xffff;
7554         else
7555                 reset_mask2 = 0x1ffff;
7556
7557         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7558                reset_mask1 & (~not_reset_mask1));
7559         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7560                reset_mask2 & (~not_reset_mask2));
7561
7562         barrier();
7563         mmiowb();
7564
7565         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7566         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7567         mmiowb();
7568 }
7569
7570 static int bnx2x_process_kill(struct bnx2x *bp)
7571 {
7572         int cnt = 1000;
7573         u32 val = 0;
7574         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7575
7576
7577         /* Empty the Tetris buffer, wait for 1s */
7578         do {
7579                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7580                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7581                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7582                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7583                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7584                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7585                     ((port_is_idle_0 & 0x1) == 0x1) &&
7586                     ((port_is_idle_1 & 0x1) == 0x1) &&
7587                     (pgl_exp_rom2 == 0xffffffff))
7588                         break;
7589                 msleep(1);
7590         } while (cnt-- > 0);
7591
7592         if (cnt <= 0) {
7593                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7594                           " are still"
7595                           " outstanding read requests after 1s!\n");
7596                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7597                           " port_is_idle_0=0x%08x,"
7598                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7599                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7600                           pgl_exp_rom2);
7601                 return -EAGAIN;
7602         }
7603
7604         barrier();
7605
7606         /* Close gates #2, #3 and #4 */
7607         bnx2x_set_234_gates(bp, true);
7608
7609         /* TBD: Indicate that "process kill" is in progress to MCP */
7610
7611         /* Clear "unprepared" bit */
7612         REG_WR(bp, MISC_REG_UNPREPARED, 0);
7613         barrier();
7614
7615         /* Make sure all is written to the chip before the reset */
7616         mmiowb();
7617
7618         /* Wait for 1ms to empty GLUE and PCI-E core queues,
7619          * PSWHST, GRC and PSWRD Tetris buffer.
7620          */
7621         msleep(1);
7622
7623         /* Prepare to chip reset: */
7624         /* MCP */
7625         bnx2x_reset_mcp_prep(bp, &val);
7626
7627         /* PXP */
7628         bnx2x_pxp_prep(bp);
7629         barrier();
7630
7631         /* reset the chip */
7632         bnx2x_process_kill_chip_reset(bp);
7633         barrier();
7634
7635         /* Recover after reset: */
7636         /* MCP */
7637         if (bnx2x_reset_mcp_comp(bp, val))
7638                 return -EAGAIN;
7639
7640         /* PXP */
7641         bnx2x_pxp_prep(bp);
7642
7643         /* Open the gates #2, #3 and #4 */
7644         bnx2x_set_234_gates(bp, false);
7645
7646         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7647          * reset state, re-enable attentions. */
7648
7649         return 0;
7650 }
7651
7652 static int bnx2x_leader_reset(struct bnx2x *bp)
7653 {
7654         int rc = 0;
7655         /* Try to recover after the failure */
7656         if (bnx2x_process_kill(bp)) {
7657                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7658                        bp->dev->name);
7659                 rc = -EAGAIN;
7660                 goto exit_leader_reset;
7661         }
7662
7663         /* Clear "reset is in progress" bit and update the driver state */
7664         bnx2x_set_reset_done(bp);
7665         bp->recovery_state = BNX2X_RECOVERY_DONE;
7666
7667 exit_leader_reset:
7668         bp->is_leader = 0;
7669         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7670         smp_wmb();
7671         return rc;
7672 }
7673
7674 /* Assumption: runs under rtnl lock. This together with the fact
7675  * that it's called only from bnx2x_reset_task() ensure that it
7676  * will never be called when netif_running(bp->dev) is false.
7677  */
7678 static void bnx2x_parity_recover(struct bnx2x *bp)
7679 {
7680         DP(NETIF_MSG_HW, "Handling parity\n");
7681         while (1) {
7682                 switch (bp->recovery_state) {
7683                 case BNX2X_RECOVERY_INIT:
7684                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7685                         /* Try to get a LEADER_LOCK HW lock */
7686                         if (bnx2x_trylock_hw_lock(bp,
7687                                 HW_LOCK_RESOURCE_RESERVED_08))
7688                                 bp->is_leader = 1;
7689
7690                         /* Stop the driver */
7691                         /* If interface has been removed - break */
7692                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7693                                 return;
7694
7695                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
7696                         /* Ensure "is_leader" and "recovery_state"
7697                          *  update values are seen on other CPUs
7698                          */
7699                         smp_wmb();
7700                         break;
7701
7702                 case BNX2X_RECOVERY_WAIT:
7703                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7704                         if (bp->is_leader) {
7705                                 u32 load_counter = bnx2x_get_load_cnt(bp);
7706                                 if (load_counter) {
7707                                         /* Wait until all other functions get
7708                                          * down.
7709                                          */
7710                                         schedule_delayed_work(&bp->reset_task,
7711                                                                 HZ/10);
7712                                         return;
7713                                 } else {
7714                                         /* If all other functions got down -
7715                                          * try to bring the chip back to
7716                                          * normal. In any case it's an exit
7717                                          * point for a leader.
7718                                          */
7719                                         if (bnx2x_leader_reset(bp) ||
7720                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
7721                                                 printk(KERN_ERR"%s: Recovery "
7722                                                 "has failed. Power cycle is "
7723                                                 "needed.\n", bp->dev->name);
7724                                                 /* Disconnect this device */
7725                                                 netif_device_detach(bp->dev);
7726                                                 /* Block ifup for all function
7727                                                  * of this ASIC until
7728                                                  * "process kill" or power
7729                                                  * cycle.
7730                                                  */
7731                                                 bnx2x_set_reset_in_progress(bp);
7732                                                 /* Shut down the power */
7733                                                 bnx2x_set_power_state(bp,
7734                                                                 PCI_D3hot);
7735                                                 return;
7736                                         }
7737
7738                                         return;
7739                                 }
7740                         } else { /* non-leader */
7741                                 if (!bnx2x_reset_is_done(bp)) {
7742                                         /* Try to get a LEADER_LOCK HW lock as
7743                                          * long as a former leader may have
7744                                          * been unloaded by the user or
7745                                          * released a leadership by another
7746                                          * reason.
7747                                          */
7748                                         if (bnx2x_trylock_hw_lock(bp,
7749                                             HW_LOCK_RESOURCE_RESERVED_08)) {
7750                                                 /* I'm a leader now! Restart a
7751                                                  * switch case.
7752                                                  */
7753                                                 bp->is_leader = 1;
7754                                                 break;
7755                                         }
7756
7757                                         schedule_delayed_work(&bp->reset_task,
7758                                                                 HZ/10);
7759                                         return;
7760
7761                                 } else { /* A leader has completed
7762                                           * the "process kill". It's an exit
7763                                           * point for a non-leader.
7764                                           */
7765                                         bnx2x_nic_load(bp, LOAD_NORMAL);
7766                                         bp->recovery_state =
7767                                                 BNX2X_RECOVERY_DONE;
7768                                         smp_wmb();
7769                                         return;
7770                                 }
7771                         }
7772                 default:
7773                         return;
7774                 }
7775         }
7776 }
7777
7778 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7779  * scheduled on a general queue in order to prevent a dead lock.
7780  */
7781 static void bnx2x_reset_task(struct work_struct *work)
7782 {
7783         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7784
7785 #ifdef BNX2X_STOP_ON_ERROR
7786         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7787                   " so reset not done to allow debug dump,\n"
7788          KERN_ERR " you will need to reboot when done\n");
7789         return;
7790 #endif
7791
7792         rtnl_lock();
7793
7794         if (!netif_running(bp->dev))
7795                 goto reset_task_exit;
7796
7797         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7798                 bnx2x_parity_recover(bp);
7799         else {
7800                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7801                 bnx2x_nic_load(bp, LOAD_NORMAL);
7802         }
7803
7804 reset_task_exit:
7805         rtnl_unlock();
7806 }
7807
7808 /* end of nic load/unload */
7809
7810 /*
7811  * Init service functions
7812  */
7813
7814 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7815 {
7816         u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7817         u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7818         return base + (BP_ABS_FUNC(bp)) * stride;
7819 }
7820
7821 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
7822 {
7823         u32 reg = bnx2x_get_pretend_reg(bp);
7824
7825         /* Flush all outstanding writes */
7826         mmiowb();
7827
7828         /* Pretend to be function 0 */
7829         REG_WR(bp, reg, 0);
7830         REG_RD(bp, reg);        /* Flush the GRC transaction (in the chip) */
7831
7832         /* From now we are in the "like-E1" mode */
7833         bnx2x_int_disable(bp);
7834
7835         /* Flush all outstanding writes */
7836         mmiowb();
7837
7838         /* Restore the original function */
7839         REG_WR(bp, reg, BP_ABS_FUNC(bp));
7840         REG_RD(bp, reg);
7841 }
7842
7843 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
7844 {
7845         if (CHIP_IS_E1(bp))
7846                 bnx2x_int_disable(bp);
7847         else
7848                 bnx2x_undi_int_disable_e1h(bp);
7849 }
7850
7851 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7852 {
7853         u32 val;
7854
7855         /* Check if there is any driver already loaded */
7856         val = REG_RD(bp, MISC_REG_UNPREPARED);
7857         if (val == 0x1) {
7858                 /* Check if it is the UNDI driver
7859                  * UNDI driver initializes CID offset for normal bell to 0x7
7860                  */
7861                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7862                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7863                 if (val == 0x7) {
7864                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7865                         /* save our pf_num */
7866                         int orig_pf_num = bp->pf_num;
7867                         u32 swap_en;
7868                         u32 swap_val;
7869
7870                         /* clear the UNDI indication */
7871                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7872
7873                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7874
7875                         /* try unload UNDI on port 0 */
7876                         bp->pf_num = 0;
7877                         bp->fw_seq =
7878                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7879                                 DRV_MSG_SEQ_NUMBER_MASK);
7880                         reset_code = bnx2x_fw_command(bp, reset_code, 0);
7881
7882                         /* if UNDI is loaded on the other port */
7883                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7884
7885                                 /* send "DONE" for previous unload */
7886                                 bnx2x_fw_command(bp,
7887                                                  DRV_MSG_CODE_UNLOAD_DONE, 0);
7888
7889                                 /* unload UNDI on port 1 */
7890                                 bp->pf_num = 1;
7891                                 bp->fw_seq =
7892                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7893                                         DRV_MSG_SEQ_NUMBER_MASK);
7894                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7895
7896                                 bnx2x_fw_command(bp, reset_code, 0);
7897                         }
7898
7899                         /* now it's safe to release the lock */
7900                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7901
7902                         bnx2x_undi_int_disable(bp);
7903
7904                         /* close input traffic and wait for it */
7905                         /* Do not rcv packets to BRB */
7906                         REG_WR(bp,
7907                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7908                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7909                         /* Do not direct rcv packets that are not for MCP to
7910                          * the BRB */
7911                         REG_WR(bp,
7912                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7913                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7914                         /* clear AEU */
7915                         REG_WR(bp,
7916                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7917                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7918                         msleep(10);
7919
7920                         /* save NIG port swap info */
7921                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7922                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7923                         /* reset device */
7924                         REG_WR(bp,
7925                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7926                                0xd3ffffff);
7927                         REG_WR(bp,
7928                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7929                                0x1403);
7930                         /* take the NIG out of reset and restore swap values */
7931                         REG_WR(bp,
7932                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7933                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7934                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7935                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7936
7937                         /* send unload done to the MCP */
7938                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7939
7940                         /* restore our func and fw_seq */
7941                         bp->pf_num = orig_pf_num;
7942                         bp->fw_seq =
7943                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7944                                 DRV_MSG_SEQ_NUMBER_MASK);
7945                 } else
7946                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7947         }
7948 }
7949
7950 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7951 {
7952         u32 val, val2, val3, val4, id;
7953         u16 pmc;
7954
7955         /* Get the chip revision id and number. */
7956         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7957         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7958         id = ((val & 0xffff) << 16);
7959         val = REG_RD(bp, MISC_REG_CHIP_REV);
7960         id |= ((val & 0xf) << 12);
7961         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7962         id |= ((val & 0xff) << 4);
7963         val = REG_RD(bp, MISC_REG_BOND_ID);
7964         id |= (val & 0xf);
7965         bp->common.chip_id = id;
7966
7967         /* Set doorbell size */
7968         bp->db_size = (1 << BNX2X_DB_SHIFT);
7969
7970         if (CHIP_IS_E2(bp)) {
7971                 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7972                 if ((val & 1) == 0)
7973                         val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7974                 else
7975                         val = (val >> 1) & 1;
7976                 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7977                                                        "2_PORT_MODE");
7978                 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7979                                                  CHIP_2_PORT_MODE;
7980
7981                 if (CHIP_MODE_IS_4_PORT(bp))
7982                         bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
7983                 else
7984                         bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
7985         } else {
7986                 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7987                 bp->pfid = bp->pf_num;                  /* 0..7 */
7988         }
7989
7990         /*
7991          * set base FW non-default (fast path) status block id, this value is
7992          * used to initialize the fw_sb_id saved on the fp/queue structure to
7993          * determine the id used by the FW.
7994          */
7995         if (CHIP_IS_E1x(bp))
7996                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7997         else /* E2 */
7998                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7999
8000         bp->link_params.chip_id = bp->common.chip_id;
8001         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8002
8003         val = (REG_RD(bp, 0x2874) & 0x55);
8004         if ((bp->common.chip_id & 0x1) ||
8005             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8006                 bp->flags |= ONE_PORT_FLAG;
8007                 BNX2X_DEV_INFO("single port device\n");
8008         }
8009
8010         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8011         bp->common.flash_size = (NVRAM_1MB_SIZE <<
8012                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
8013         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8014                        bp->common.flash_size, bp->common.flash_size);
8015
8016         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8017         bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
8018                                         MISC_REG_GENERIC_CR_1 :
8019                                         MISC_REG_GENERIC_CR_0));
8020         bp->link_params.shmem_base = bp->common.shmem_base;
8021         bp->link_params.shmem2_base = bp->common.shmem2_base;
8022         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8023                        bp->common.shmem_base, bp->common.shmem2_base);
8024
8025         if (!bp->common.shmem_base) {
8026                 BNX2X_DEV_INFO("MCP not active\n");
8027                 bp->flags |= NO_MCP_FLAG;
8028                 return;
8029         }
8030
8031         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8032         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8033                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8034                 BNX2X_ERR("BAD MCP validity signature\n");
8035
8036         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8037         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8038
8039         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8040                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8041                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8042
8043         bp->link_params.feature_config_flags = 0;
8044         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8045         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8046                 bp->link_params.feature_config_flags |=
8047                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8048         else
8049                 bp->link_params.feature_config_flags &=
8050                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8051
8052         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8053         bp->common.bc_ver = val;
8054         BNX2X_DEV_INFO("bc_ver %X\n", val);
8055         if (val < BNX2X_BC_VER) {
8056                 /* for now only warn
8057                  * later we might need to enforce this */
8058                 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
8059                           "please upgrade BC\n", BNX2X_BC_VER, val);
8060         }
8061         bp->link_params.feature_config_flags |=
8062                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
8063                                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8064
8065         bp->link_params.feature_config_flags |=
8066                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
8067                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
8068
8069         if (BP_E1HVN(bp) == 0) {
8070                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8071                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8072         } else {
8073                 /* no WOL capability for E1HVN != 0 */
8074                 bp->flags |= NO_WOL_FLAG;
8075         }
8076         BNX2X_DEV_INFO("%sWoL capable\n",
8077                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8078
8079         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8080         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8081         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8082         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8083
8084         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
8085                  val, val2, val3, val4);
8086 }
8087
8088 #define IGU_FID(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
8089 #define IGU_VEC(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
8090
8091 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
8092 {
8093         int pfid = BP_FUNC(bp);
8094         int vn = BP_E1HVN(bp);
8095         int igu_sb_id;
8096         u32 val;
8097         u8 fid;
8098
8099         bp->igu_base_sb = 0xff;
8100         bp->igu_sb_cnt = 0;
8101         if (CHIP_INT_MODE_IS_BC(bp)) {
8102                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8103                                        NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8104
8105                 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
8106                         FP_SB_MAX_E1x;
8107
8108                 bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
8109                         (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
8110
8111                 return;
8112         }
8113
8114         /* IGU in normal mode - read CAM */
8115         for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
8116              igu_sb_id++) {
8117                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
8118                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
8119                         continue;
8120                 fid = IGU_FID(val);
8121                 if ((fid & IGU_FID_ENCODE_IS_PF)) {
8122                         if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
8123                                 continue;
8124                         if (IGU_VEC(val) == 0)
8125                                 /* default status block */
8126                                 bp->igu_dsb_id = igu_sb_id;
8127                         else {
8128                                 if (bp->igu_base_sb == 0xff)
8129                                         bp->igu_base_sb = igu_sb_id;
8130                                 bp->igu_sb_cnt++;
8131                         }
8132                 }
8133         }
8134         bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8135                                    NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8136         if (bp->igu_sb_cnt == 0)
8137                 BNX2X_ERR("CAM configuration error\n");
8138 }
8139
8140 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8141                                                     u32 switch_cfg)
8142 {
8143         int cfg_size = 0, idx, port = BP_PORT(bp);
8144
8145         /* Aggregation of supported attributes of all external phys */
8146         bp->port.supported[0] = 0;
8147         bp->port.supported[1] = 0;
8148         switch (bp->link_params.num_phys) {
8149         case 1:
8150                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8151                 cfg_size = 1;
8152                 break;
8153         case 2:
8154                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8155                 cfg_size = 1;
8156                 break;
8157         case 3:
8158                 if (bp->link_params.multi_phy_config &
8159                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8160                         bp->port.supported[1] =
8161                                 bp->link_params.phy[EXT_PHY1].supported;
8162                         bp->port.supported[0] =
8163                                 bp->link_params.phy[EXT_PHY2].supported;
8164                 } else {
8165                         bp->port.supported[0] =
8166                                 bp->link_params.phy[EXT_PHY1].supported;
8167                         bp->port.supported[1] =
8168                                 bp->link_params.phy[EXT_PHY2].supported;
8169                 }
8170                 cfg_size = 2;
8171                 break;
8172         }
8173
8174         if (!(bp->port.supported[0] || bp->port.supported[1])) {
8175                 BNX2X_ERR("NVRAM config error. BAD phy config."
8176                           "PHY1 config 0x%x, PHY2 config 0x%x\n",
8177                            SHMEM_RD(bp,
8178                            dev_info.port_hw_config[port].external_phy_config),
8179                            SHMEM_RD(bp,
8180                            dev_info.port_hw_config[port].external_phy_config2));
8181                         return;
8182         }
8183
8184         switch (switch_cfg) {
8185         case SWITCH_CFG_1G:
8186                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8187                                            port*0x10);
8188                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8189                 break;
8190
8191         case SWITCH_CFG_10G:
8192                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8193                                            port*0x18);
8194                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8195                 break;
8196
8197         default:
8198                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8199                           bp->port.link_config[0]);
8200                 return;
8201         }
8202         /* mask what we support according to speed_cap_mask per configuration */
8203         for (idx = 0; idx < cfg_size; idx++) {
8204                 if (!(bp->link_params.speed_cap_mask[idx] &
8205                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8206                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
8207
8208                 if (!(bp->link_params.speed_cap_mask[idx] &
8209                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8210                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
8211
8212                 if (!(bp->link_params.speed_cap_mask[idx] &
8213                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8214                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
8215
8216                 if (!(bp->link_params.speed_cap_mask[idx] &
8217                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8218                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
8219
8220                 if (!(bp->link_params.speed_cap_mask[idx] &
8221                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8222                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
8223                                                      SUPPORTED_1000baseT_Full);
8224
8225                 if (!(bp->link_params.speed_cap_mask[idx] &
8226                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8227                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
8228
8229                 if (!(bp->link_params.speed_cap_mask[idx] &
8230                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8231                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
8232
8233         }
8234
8235         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8236                        bp->port.supported[1]);
8237 }
8238
8239 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8240 {
8241         u32 link_config, idx, cfg_size = 0;
8242         bp->port.advertising[0] = 0;
8243         bp->port.advertising[1] = 0;
8244         switch (bp->link_params.num_phys) {
8245         case 1:
8246         case 2:
8247                 cfg_size = 1;
8248                 break;
8249         case 3:
8250                 cfg_size = 2;
8251                 break;
8252         }
8253         for (idx = 0; idx < cfg_size; idx++) {
8254                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8255                 link_config = bp->port.link_config[idx];
8256                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8257                 case PORT_FEATURE_LINK_SPEED_AUTO:
8258                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8259                                 bp->link_params.req_line_speed[idx] =
8260                                         SPEED_AUTO_NEG;
8261                                 bp->port.advertising[idx] |=
8262                                         bp->port.supported[idx];
8263                         } else {
8264                                 /* force 10G, no AN */
8265                                 bp->link_params.req_line_speed[idx] =
8266                                         SPEED_10000;
8267                                 bp->port.advertising[idx] |=
8268                                         (ADVERTISED_10000baseT_Full |
8269                                          ADVERTISED_FIBRE);
8270                                 continue;
8271                         }
8272                         break;
8273
8274                 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8275                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8276                                 bp->link_params.req_line_speed[idx] =
8277                                         SPEED_10;
8278                                 bp->port.advertising[idx] |=
8279                                         (ADVERTISED_10baseT_Full |
8280                                          ADVERTISED_TP);
8281                         } else {
8282                                 BNX2X_ERROR("NVRAM config error. "
8283                                             "Invalid link_config 0x%x"
8284                                             "  speed_cap_mask 0x%x\n",
8285                                             link_config,
8286                                     bp->link_params.speed_cap_mask[idx]);
8287                                 return;
8288                         }
8289                         break;
8290
8291                 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8292                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8293                                 bp->link_params.req_line_speed[idx] =
8294                                         SPEED_10;
8295                                 bp->link_params.req_duplex[idx] =
8296                                         DUPLEX_HALF;
8297                                 bp->port.advertising[idx] |=
8298                                         (ADVERTISED_10baseT_Half |
8299                                          ADVERTISED_TP);
8300                         } else {
8301                                 BNX2X_ERROR("NVRAM config error. "
8302                                             "Invalid link_config 0x%x"
8303                                             "  speed_cap_mask 0x%x\n",
8304                                             link_config,
8305                                           bp->link_params.speed_cap_mask[idx]);
8306                                 return;
8307                         }
8308                         break;
8309
8310                 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8311                         if (bp->port.supported[idx] &
8312                             SUPPORTED_100baseT_Full) {
8313                                 bp->link_params.req_line_speed[idx] =
8314                                         SPEED_100;
8315                                 bp->port.advertising[idx] |=
8316                                         (ADVERTISED_100baseT_Full |
8317                                          ADVERTISED_TP);
8318                         } else {
8319                                 BNX2X_ERROR("NVRAM config error. "
8320                                             "Invalid link_config 0x%x"
8321                                             "  speed_cap_mask 0x%x\n",
8322                                             link_config,
8323                                           bp->link_params.speed_cap_mask[idx]);
8324                                 return;
8325                         }
8326                         break;
8327
8328                 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8329                         if (bp->port.supported[idx] &
8330                             SUPPORTED_100baseT_Half) {
8331                                 bp->link_params.req_line_speed[idx] =
8332                                                                 SPEED_100;
8333                                 bp->link_params.req_duplex[idx] =
8334                                                                 DUPLEX_HALF;
8335                                 bp->port.advertising[idx] |=
8336                                         (ADVERTISED_100baseT_Half |
8337                                          ADVERTISED_TP);
8338                         } else {
8339                                 BNX2X_ERROR("NVRAM config error. "
8340                                     "Invalid link_config 0x%x"
8341                                     "  speed_cap_mask 0x%x\n",
8342                                     link_config,
8343                                     bp->link_params.speed_cap_mask[idx]);
8344                                 return;
8345                         }
8346                         break;
8347
8348                 case PORT_FEATURE_LINK_SPEED_1G:
8349                         if (bp->port.supported[idx] &
8350                             SUPPORTED_1000baseT_Full) {
8351                                 bp->link_params.req_line_speed[idx] =
8352                                         SPEED_1000;
8353                                 bp->port.advertising[idx] |=
8354                                         (ADVERTISED_1000baseT_Full |
8355                                          ADVERTISED_TP);
8356                         } else {
8357                                 BNX2X_ERROR("NVRAM config error. "
8358                                     "Invalid link_config 0x%x"
8359                                     "  speed_cap_mask 0x%x\n",
8360                                     link_config,
8361                                     bp->link_params.speed_cap_mask[idx]);
8362                                 return;
8363                         }
8364                         break;
8365
8366                 case PORT_FEATURE_LINK_SPEED_2_5G:
8367                         if (bp->port.supported[idx] &
8368                             SUPPORTED_2500baseX_Full) {
8369                                 bp->link_params.req_line_speed[idx] =
8370                                         SPEED_2500;
8371                                 bp->port.advertising[idx] |=
8372                                         (ADVERTISED_2500baseX_Full |
8373                                                 ADVERTISED_TP);
8374                         } else {
8375                                 BNX2X_ERROR("NVRAM config error. "
8376                                     "Invalid link_config 0x%x"
8377                                     "  speed_cap_mask 0x%x\n",
8378                                     link_config,
8379                                     bp->link_params.speed_cap_mask[idx]);
8380                                 return;
8381                         }
8382                         break;
8383
8384                 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8385                 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8386                 case PORT_FEATURE_LINK_SPEED_10G_KR:
8387                         if (bp->port.supported[idx] &
8388                             SUPPORTED_10000baseT_Full) {
8389                                 bp->link_params.req_line_speed[idx] =
8390                                         SPEED_10000;
8391                                 bp->port.advertising[idx] |=
8392                                         (ADVERTISED_10000baseT_Full |
8393                                                 ADVERTISED_FIBRE);
8394                         } else {
8395                                 BNX2X_ERROR("NVRAM config error. "
8396                                     "Invalid link_config 0x%x"
8397                                     "  speed_cap_mask 0x%x\n",
8398                                     link_config,
8399                                     bp->link_params.speed_cap_mask[idx]);
8400                                 return;
8401                         }
8402                         break;
8403
8404                 default:
8405                         BNX2X_ERROR("NVRAM config error. "
8406                                     "BAD link speed link_config 0x%x\n",
8407                                           link_config);
8408                                 bp->link_params.req_line_speed[idx] =
8409                                                         SPEED_AUTO_NEG;
8410                                 bp->port.advertising[idx] =
8411                                                 bp->port.supported[idx];
8412                         break;
8413                 }
8414
8415                 bp->link_params.req_flow_ctrl[idx] = (link_config &
8416                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8417                 if ((bp->link_params.req_flow_ctrl[idx] ==
8418                      BNX2X_FLOW_CTRL_AUTO) &&
8419                     !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8420                         bp->link_params.req_flow_ctrl[idx] =
8421                                 BNX2X_FLOW_CTRL_NONE;
8422                 }
8423
8424                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
8425                                " 0x%x advertising 0x%x\n",
8426                                bp->link_params.req_line_speed[idx],
8427                                bp->link_params.req_duplex[idx],
8428                                bp->link_params.req_flow_ctrl[idx],
8429                                bp->port.advertising[idx]);
8430         }
8431 }
8432
8433 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8434 {
8435         mac_hi = cpu_to_be16(mac_hi);
8436         mac_lo = cpu_to_be32(mac_lo);
8437         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8438         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8439 }
8440
8441 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8442 {
8443         int port = BP_PORT(bp);
8444         u32 config;
8445         u32 ext_phy_type, ext_phy_config;
8446
8447         bp->link_params.bp = bp;
8448         bp->link_params.port = port;
8449
8450         bp->link_params.lane_config =
8451                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8452
8453         bp->link_params.speed_cap_mask[0] =
8454                 SHMEM_RD(bp,
8455                          dev_info.port_hw_config[port].speed_capability_mask);
8456         bp->link_params.speed_cap_mask[1] =
8457                 SHMEM_RD(bp,
8458                          dev_info.port_hw_config[port].speed_capability_mask2);
8459         bp->port.link_config[0] =
8460                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8461
8462         bp->port.link_config[1] =
8463                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
8464
8465         bp->link_params.multi_phy_config =
8466                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
8467         /* If the device is capable of WoL, set the default state according
8468          * to the HW
8469          */
8470         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8471         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8472                    (config & PORT_FEATURE_WOL_ENABLED));
8473
8474         BNX2X_DEV_INFO("lane_config 0x%08x  "
8475                        "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
8476                        bp->link_params.lane_config,
8477                        bp->link_params.speed_cap_mask[0],
8478                        bp->port.link_config[0]);
8479
8480         bp->link_params.switch_cfg = (bp->port.link_config[0] &
8481                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8482         bnx2x_phy_probe(&bp->link_params);
8483         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8484
8485         bnx2x_link_settings_requested(bp);
8486
8487         /*
8488          * If connected directly, work with the internal PHY, otherwise, work
8489          * with the external PHY
8490          */
8491         ext_phy_config =
8492                 SHMEM_RD(bp,
8493                          dev_info.port_hw_config[port].external_phy_config);
8494         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
8495         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8496                 bp->mdio.prtad = bp->port.phy_addr;
8497
8498         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8499                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8500                 bp->mdio.prtad =
8501                         XGXS_EXT_PHY_ADDR(ext_phy_config);
8502
8503         /*
8504          * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8505          * In MF mode, it is set to cover self test cases
8506          */
8507         if (IS_MF(bp))
8508                 bp->port.need_hw_lock = 1;
8509         else
8510                 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8511                                                         bp->common.shmem_base,
8512                                                         bp->common.shmem2_base);
8513 }
8514
8515 #ifdef BCM_CNIC
8516 static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8517 {
8518         u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8519                                 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8520         u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8521                                 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8522
8523         /* Get the number of maximum allowed iSCSI and FCoE connections */
8524         bp->cnic_eth_dev.max_iscsi_conn =
8525                 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8526                 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8527
8528         bp->cnic_eth_dev.max_fcoe_conn =
8529                 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8530                 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8531
8532         BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8533                        bp->cnic_eth_dev.max_iscsi_conn,
8534                        bp->cnic_eth_dev.max_fcoe_conn);
8535
8536         /* If mamimum allowed number of connections is zero -
8537          * disable the feature.
8538          */
8539         if (!bp->cnic_eth_dev.max_iscsi_conn)
8540                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8541
8542         if (!bp->cnic_eth_dev.max_fcoe_conn)
8543                 bp->flags |= NO_FCOE_FLAG;
8544 }
8545 #endif
8546
8547 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8548 {
8549         u32 val, val2;
8550         int func = BP_ABS_FUNC(bp);
8551         int port = BP_PORT(bp);
8552 #ifdef BCM_CNIC
8553         u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8554         u8 *fip_mac = bp->fip_mac;
8555 #endif
8556
8557         if (BP_NOMCP(bp)) {
8558                 BNX2X_ERROR("warning: random MAC workaround active\n");
8559                 random_ether_addr(bp->dev->dev_addr);
8560         } else if (IS_MF(bp)) {
8561                 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8562                 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8563                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8564                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8565                         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8566
8567 #ifdef BCM_CNIC
8568                 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8569                  * FCoE MAC then the appropriate feature should be disabled.
8570                  */
8571                 if (IS_MF_SI(bp)) {
8572                         u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8573                         if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8574                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
8575                                                      iscsi_mac_addr_upper);
8576                                 val = MF_CFG_RD(bp, func_ext_config[func].
8577                                                     iscsi_mac_addr_lower);
8578                                 BNX2X_DEV_INFO("Read iSCSI MAC: "
8579                                                "0x%x:0x%04x\n", val2, val);
8580                                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8581
8582                                 /* Disable iSCSI OOO if MAC configuration is
8583                                  * invalid.
8584                                  */
8585                                 if (!is_valid_ether_addr(iscsi_mac)) {
8586                                         bp->flags |= NO_ISCSI_OOO_FLAG |
8587                                                      NO_ISCSI_FLAG;
8588                                         memset(iscsi_mac, 0, ETH_ALEN);
8589                                 }
8590                         } else
8591                                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8592
8593                         if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8594                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
8595                                                      fcoe_mac_addr_upper);
8596                                 val = MF_CFG_RD(bp, func_ext_config[func].
8597                                                     fcoe_mac_addr_lower);
8598                                 BNX2X_DEV_INFO("Read FCoE MAC to "
8599                                                "0x%x:0x%04x\n", val2, val);
8600                                 bnx2x_set_mac_buf(fip_mac, val, val2);
8601
8602                                 /* Disable FCoE if MAC configuration is
8603                                  * invalid.
8604                                  */
8605                                 if (!is_valid_ether_addr(fip_mac)) {
8606                                         bp->flags |= NO_FCOE_FLAG;
8607                                         memset(bp->fip_mac, 0, ETH_ALEN);
8608                                 }
8609                         } else
8610                                 bp->flags |= NO_FCOE_FLAG;
8611                 }
8612 #endif
8613         } else {
8614                 /* in SF read MACs from port configuration */
8615                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8616                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8617                 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8618
8619 #ifdef BCM_CNIC
8620                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8621                                     iscsi_mac_upper);
8622                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8623                                    iscsi_mac_lower);
8624                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8625 #endif
8626         }
8627
8628         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8629         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8630
8631 #ifdef BCM_CNIC
8632         /* Set the FCoE MAC in modes other then MF_SI */
8633         if (!CHIP_IS_E1x(bp)) {
8634                 if (IS_MF_SD(bp))
8635                         memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8636                 else if (!IS_MF(bp))
8637                         memcpy(fip_mac, iscsi_mac, ETH_ALEN);
8638         }
8639 #endif
8640 }
8641
8642 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8643 {
8644         int /*abs*/func = BP_ABS_FUNC(bp);
8645         int vn, port;
8646         u32 val = 0;
8647         int rc = 0;
8648
8649         bnx2x_get_common_hwinfo(bp);
8650
8651         if (CHIP_IS_E1x(bp)) {
8652                 bp->common.int_block = INT_BLOCK_HC;
8653
8654                 bp->igu_dsb_id = DEF_SB_IGU_ID;
8655                 bp->igu_base_sb = 0;
8656                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8657                                        NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8658         } else {
8659                 bp->common.int_block = INT_BLOCK_IGU;
8660                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8661                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8662                         DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8663                         bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8664                 } else
8665                         DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8666
8667                 bnx2x_get_igu_cam_info(bp);
8668
8669         }
8670         DP(NETIF_MSG_PROBE, "igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n",
8671                              bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8672
8673         /*
8674          * Initialize MF configuration
8675          */
8676
8677         bp->mf_ov = 0;
8678         bp->mf_mode = 0;
8679         vn = BP_E1HVN(bp);
8680         port = BP_PORT(bp);
8681
8682         if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8683                 DP(NETIF_MSG_PROBE,
8684                             "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8685                             bp->common.shmem2_base, SHMEM2_RD(bp, size),
8686                             (u32)offsetof(struct shmem2_region, mf_cfg_addr));
8687                 if (SHMEM2_HAS(bp, mf_cfg_addr))
8688                         bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8689                 else
8690                         bp->common.mf_cfg_base = bp->common.shmem_base +
8691                                 offsetof(struct shmem_region, func_mb) +
8692                                 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8693                 /*
8694                  * get mf configuration:
8695                  * 1. existance of MF configuration
8696                  * 2. MAC address must be legal (check only upper bytes)
8697                  *    for  Switch-Independent mode;
8698                  *    OVLAN must be legal for Switch-Dependent mode
8699                  * 3. SF_MODE configures specific MF mode
8700                  */
8701                 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8702                         /* get mf configuration */
8703                         val = SHMEM_RD(bp,
8704                                        dev_info.shared_feature_config.config);
8705                         val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8706
8707                         switch (val) {
8708                         case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8709                                 val = MF_CFG_RD(bp, func_mf_config[func].
8710                                                 mac_upper);
8711                                 /* check for legal mac (upper bytes)*/
8712                                 if (val != 0xffff) {
8713                                         bp->mf_mode = MULTI_FUNCTION_SI;
8714                                         bp->mf_config[vn] = MF_CFG_RD(bp,
8715                                                    func_mf_config[func].config);
8716                                 } else
8717                                         DP(NETIF_MSG_PROBE, "illegal MAC "
8718                                                             "address for SI\n");
8719                                 break;
8720                         case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8721                                 /* get OV configuration */
8722                                 val = MF_CFG_RD(bp,
8723                                         func_mf_config[FUNC_0].e1hov_tag);
8724                                 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8725
8726                                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8727                                         bp->mf_mode = MULTI_FUNCTION_SD;
8728                                         bp->mf_config[vn] = MF_CFG_RD(bp,
8729                                                 func_mf_config[func].config);
8730                                 } else
8731                                         DP(NETIF_MSG_PROBE, "illegal OV for "
8732                                                             "SD\n");
8733                                 break;
8734                         default:
8735                                 /* Unknown configuration: reset mf_config */
8736                                 bp->mf_config[vn] = 0;
8737                                 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8738                                    val);
8739                         }
8740                 }
8741
8742                 BNX2X_DEV_INFO("%s function mode\n",
8743                                IS_MF(bp) ? "multi" : "single");
8744
8745                 switch (bp->mf_mode) {
8746                 case MULTI_FUNCTION_SD:
8747                         val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8748                               FUNC_MF_CFG_E1HOV_TAG_MASK;
8749                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8750                                 bp->mf_ov = val;
8751                                 BNX2X_DEV_INFO("MF OV for func %d is %d"
8752                                                " (0x%04x)\n", func,
8753                                                bp->mf_ov, bp->mf_ov);
8754                         } else {
8755                                 BNX2X_ERR("No valid MF OV for func %d,"
8756                                           "  aborting\n", func);
8757                                 rc = -EPERM;
8758                         }
8759                         break;
8760                 case MULTI_FUNCTION_SI:
8761                         BNX2X_DEV_INFO("func %d is in MF "
8762                                        "switch-independent mode\n", func);
8763                         break;
8764                 default:
8765                         if (vn) {
8766                                 BNX2X_ERR("VN %d in single function mode,"
8767                                           "  aborting\n", vn);
8768                                 rc = -EPERM;
8769                         }
8770                         break;
8771                 }
8772
8773         }
8774
8775         /* adjust igu_sb_cnt to MF for E1x */
8776         if (CHIP_IS_E1x(bp) && IS_MF(bp))
8777                 bp->igu_sb_cnt /= E1HVN_MAX;
8778
8779         /*
8780          * adjust E2 sb count: to be removed when FW will support
8781          * more then 16 L2 clients
8782          */
8783 #define MAX_L2_CLIENTS                          16
8784         if (CHIP_IS_E2(bp))
8785                 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8786                                        MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8787
8788         if (!BP_NOMCP(bp)) {
8789                 bnx2x_get_port_hwinfo(bp);
8790
8791                 bp->fw_seq =
8792                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8793                          DRV_MSG_SEQ_NUMBER_MASK);
8794                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8795         }
8796
8797         /* Get MAC addresses */
8798         bnx2x_get_mac_hwinfo(bp);
8799
8800 #ifdef BCM_CNIC
8801         bnx2x_get_cnic_info(bp);
8802 #endif
8803
8804         return rc;
8805 }
8806
8807 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8808 {
8809         int cnt, i, block_end, rodi;
8810         char vpd_data[BNX2X_VPD_LEN+1];
8811         char str_id_reg[VENDOR_ID_LEN+1];
8812         char str_id_cap[VENDOR_ID_LEN+1];
8813         u8 len;
8814
8815         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8816         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8817
8818         if (cnt < BNX2X_VPD_LEN)
8819                 goto out_not_found;
8820
8821         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8822                              PCI_VPD_LRDT_RO_DATA);
8823         if (i < 0)
8824                 goto out_not_found;
8825
8826
8827         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8828                     pci_vpd_lrdt_size(&vpd_data[i]);
8829
8830         i += PCI_VPD_LRDT_TAG_SIZE;
8831
8832         if (block_end > BNX2X_VPD_LEN)
8833                 goto out_not_found;
8834
8835         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8836                                    PCI_VPD_RO_KEYWORD_MFR_ID);
8837         if (rodi < 0)
8838                 goto out_not_found;
8839
8840         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8841
8842         if (len != VENDOR_ID_LEN)
8843                 goto out_not_found;
8844
8845         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8846
8847         /* vendor specific info */
8848         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8849         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8850         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8851             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8852
8853                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8854                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
8855                 if (rodi >= 0) {
8856                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8857
8858                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8859
8860                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8861                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8862                                 bp->fw_ver[len] = ' ';
8863                         }
8864                 }
8865                 return;
8866         }
8867 out_not_found:
8868         return;
8869 }
8870
8871 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8872 {
8873         int func;
8874         int timer_interval;
8875         int rc;
8876
8877         /* Disable interrupt handling until HW is initialized */
8878         atomic_set(&bp->intr_sem, 1);
8879         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8880
8881         mutex_init(&bp->port.phy_mutex);
8882         mutex_init(&bp->fw_mb_mutex);
8883         spin_lock_init(&bp->stats_lock);
8884 #ifdef BCM_CNIC
8885         mutex_init(&bp->cnic_mutex);
8886 #endif
8887
8888         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8889         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8890
8891         rc = bnx2x_get_hwinfo(bp);
8892
8893         if (!rc)
8894                 rc = bnx2x_alloc_mem_bp(bp);
8895
8896         bnx2x_read_fwinfo(bp);
8897
8898         func = BP_FUNC(bp);
8899
8900         /* need to reset chip if undi was active */
8901         if (!BP_NOMCP(bp))
8902                 bnx2x_undi_unload(bp);
8903
8904         if (CHIP_REV_IS_FPGA(bp))
8905                 dev_err(&bp->pdev->dev, "FPGA detected\n");
8906
8907         if (BP_NOMCP(bp) && (func == 0))
8908                 dev_err(&bp->pdev->dev, "MCP disabled, "
8909                                         "must load devices in order!\n");
8910
8911         bp->multi_mode = multi_mode;
8912         bp->int_mode = int_mode;
8913
8914         bp->dev->features |= NETIF_F_GRO;
8915
8916         /* Set TPA flags */
8917         if (disable_tpa) {
8918                 bp->flags &= ~TPA_ENABLE_FLAG;
8919                 bp->dev->features &= ~NETIF_F_LRO;
8920         } else {
8921                 bp->flags |= TPA_ENABLE_FLAG;
8922                 bp->dev->features |= NETIF_F_LRO;
8923         }
8924         bp->disable_tpa = disable_tpa;
8925
8926         if (CHIP_IS_E1(bp))
8927                 bp->dropless_fc = 0;
8928         else
8929                 bp->dropless_fc = dropless_fc;
8930
8931         bp->mrrs = mrrs;
8932
8933         bp->tx_ring_size = MAX_TX_AVAIL;
8934
8935         bp->rx_csum = 1;
8936
8937         /* make sure that the numbers are in the right granularity */
8938         bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8939         bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
8940
8941         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8942         bp->current_interval = (poll ? poll : timer_interval);
8943
8944         init_timer(&bp->timer);
8945         bp->timer.expires = jiffies + bp->current_interval;
8946         bp->timer.data = (unsigned long) bp;
8947         bp->timer.function = bnx2x_timer;
8948
8949         bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
8950         bnx2x_dcbx_init_params(bp);
8951
8952         return rc;
8953 }
8954
8955
8956 /****************************************************************************
8957 * General service functions
8958 ****************************************************************************/
8959
8960 /* called with rtnl_lock */
8961 static int bnx2x_open(struct net_device *dev)
8962 {
8963         struct bnx2x *bp = netdev_priv(dev);
8964
8965         netif_carrier_off(dev);
8966
8967         bnx2x_set_power_state(bp, PCI_D0);
8968
8969         if (!bnx2x_reset_is_done(bp)) {
8970                 do {
8971                         /* Reset MCP mail box sequence if there is on going
8972                          * recovery
8973                          */
8974                         bp->fw_seq = 0;
8975
8976                         /* If it's the first function to load and reset done
8977                          * is still not cleared it may mean that. We don't
8978                          * check the attention state here because it may have
8979                          * already been cleared by a "common" reset but we
8980                          * shell proceed with "process kill" anyway.
8981                          */
8982                         if ((bnx2x_get_load_cnt(bp) == 0) &&
8983                                 bnx2x_trylock_hw_lock(bp,
8984                                 HW_LOCK_RESOURCE_RESERVED_08) &&
8985                                 (!bnx2x_leader_reset(bp))) {
8986                                 DP(NETIF_MSG_HW, "Recovered in open\n");
8987                                 break;
8988                         }
8989
8990                         bnx2x_set_power_state(bp, PCI_D3hot);
8991
8992                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8993                         " completed yet. Try again later. If u still see this"
8994                         " message after a few retries then power cycle is"
8995                         " required.\n", bp->dev->name);
8996
8997                         return -EAGAIN;
8998                 } while (0);
8999         }
9000
9001         bp->recovery_state = BNX2X_RECOVERY_DONE;
9002
9003         return bnx2x_nic_load(bp, LOAD_OPEN);
9004 }
9005
9006 /* called with rtnl_lock */
9007 static int bnx2x_close(struct net_device *dev)
9008 {
9009         struct bnx2x *bp = netdev_priv(dev);
9010
9011         /* Unload the driver, release IRQs */
9012         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9013         bnx2x_set_power_state(bp, PCI_D3hot);
9014
9015         return 0;
9016 }
9017
9018 #define E1_MAX_UC_LIST  29
9019 #define E1H_MAX_UC_LIST 30
9020 #define E2_MAX_UC_LIST  14
9021 static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
9022 {
9023         if (CHIP_IS_E1(bp))
9024                 return E1_MAX_UC_LIST;
9025         else if (CHIP_IS_E1H(bp))
9026                 return E1H_MAX_UC_LIST;
9027         else
9028                 return E2_MAX_UC_LIST;
9029 }
9030
9031
9032 static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
9033 {
9034         if (CHIP_IS_E1(bp))
9035                 /* CAM Entries for Port0:
9036                  *      0 - prim ETH MAC
9037                  *      1 - BCAST MAC
9038                  *      2 - iSCSI L2 ring ETH MAC
9039                  *      3-31 - UC MACs
9040                  *
9041                  * Port1 entries are allocated the same way starting from
9042                  * entry 32.
9043                  */
9044                 return 3 + 32 * BP_PORT(bp);
9045         else if (CHIP_IS_E1H(bp)) {
9046                 /* CAM Entries:
9047                  *      0-7  - prim ETH MAC for each function
9048                  *      8-15 - iSCSI L2 ring ETH MAC for each function
9049                  *      16 till 255 UC MAC lists for each function
9050                  *
9051                  * Remark: There is no FCoE support for E1H, thus FCoE related
9052                  *         MACs are not considered.
9053                  */
9054                 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
9055                         bnx2x_max_uc_list(bp) * BP_FUNC(bp);
9056         } else {
9057                 /* CAM Entries (there is a separate CAM per engine):
9058                  *      0-4  - prim ETH MAC for each function
9059                  *      4-7 - iSCSI L2 ring ETH MAC for each function
9060                  *      8-11 - FIP ucast L2 MAC for each function
9061                  *      12-15 - ALL_ENODE_MACS mcast MAC for each function
9062                  *      16 till 71 UC MAC lists for each function
9063                  */
9064                 u8 func_idx =
9065                         (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
9066
9067                 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
9068                         bnx2x_max_uc_list(bp) * func_idx;
9069         }
9070 }
9071
9072 /* set uc list, do not wait as wait implies sleep and
9073  * set_rx_mode can be invoked from non-sleepable context.
9074  *
9075  * Instead we use the same ramrod data buffer each time we need
9076  * to configure a list of addresses, and use the fact that the
9077  * list of MACs is changed in an incremental way and that the
9078  * function is called under the netif_addr_lock. A temporary
9079  * inconsistent CAM configuration (possible in case of very fast
9080  * sequence of add/del/add on the host side) will shortly be
9081  * restored by the handler of the last ramrod.
9082  */
9083 static int bnx2x_set_uc_list(struct bnx2x *bp)
9084 {
9085         int i = 0, old;
9086         struct net_device *dev = bp->dev;
9087         u8 offset = bnx2x_uc_list_cam_offset(bp);
9088         struct netdev_hw_addr *ha;
9089         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9090         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9091
9092         if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
9093                 return -EINVAL;
9094
9095         netdev_for_each_uc_addr(ha, dev) {
9096                 /* copy mac */
9097                 config_cmd->config_table[i].msb_mac_addr =
9098                         swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
9099                 config_cmd->config_table[i].middle_mac_addr =
9100                         swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
9101                 config_cmd->config_table[i].lsb_mac_addr =
9102                         swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
9103
9104                 config_cmd->config_table[i].vlan_id = 0;
9105                 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
9106                 config_cmd->config_table[i].clients_bit_vector =
9107                         cpu_to_le32(1 << BP_L_ID(bp));
9108
9109                 SET_FLAG(config_cmd->config_table[i].flags,
9110                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9111                         T_ETH_MAC_COMMAND_SET);
9112
9113                 DP(NETIF_MSG_IFUP,
9114                    "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
9115                    config_cmd->config_table[i].msb_mac_addr,
9116                    config_cmd->config_table[i].middle_mac_addr,
9117                    config_cmd->config_table[i].lsb_mac_addr);
9118
9119                 i++;
9120
9121                 /* Set uc MAC in NIG */
9122                 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
9123                                      LLH_CAM_ETH_LINE + i);
9124         }
9125         old = config_cmd->hdr.length;
9126         if (old > i) {
9127                 for (; i < old; i++) {
9128                         if (CAM_IS_INVALID(config_cmd->
9129                                            config_table[i])) {
9130                                 /* already invalidated */
9131                                 break;
9132                         }
9133                         /* invalidate */
9134                         SET_FLAG(config_cmd->config_table[i].flags,
9135                                 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9136                                 T_ETH_MAC_COMMAND_INVALIDATE);
9137                 }
9138         }
9139
9140         wmb();
9141
9142         config_cmd->hdr.length = i;
9143         config_cmd->hdr.offset = offset;
9144         config_cmd->hdr.client_id = 0xff;
9145         /* Mark that this ramrod doesn't use bp->set_mac_pending for
9146          * synchronization.
9147          */
9148         config_cmd->hdr.echo = 0;
9149
9150         mb();
9151
9152         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9153                    U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9154
9155 }
9156
9157 void bnx2x_invalidate_uc_list(struct bnx2x *bp)
9158 {
9159         int i;
9160         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9161         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9162         int ramrod_flags = WAIT_RAMROD_COMMON;
9163         u8 offset = bnx2x_uc_list_cam_offset(bp);
9164         u8 max_list_size = bnx2x_max_uc_list(bp);
9165
9166         for (i = 0; i < max_list_size; i++) {
9167                 SET_FLAG(config_cmd->config_table[i].flags,
9168                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9169                         T_ETH_MAC_COMMAND_INVALIDATE);
9170                 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
9171         }
9172
9173         wmb();
9174
9175         config_cmd->hdr.length = max_list_size;
9176         config_cmd->hdr.offset = offset;
9177         config_cmd->hdr.client_id = 0xff;
9178         /* We'll wait for a completion this time... */
9179         config_cmd->hdr.echo = 1;
9180
9181         bp->set_mac_pending = 1;
9182
9183         mb();
9184
9185         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9186                       U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9187
9188         /* Wait for a completion */
9189         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
9190                                 ramrod_flags);
9191
9192 }
9193
9194 static inline int bnx2x_set_mc_list(struct bnx2x *bp)
9195 {
9196         /* some multicasts */
9197         if (CHIP_IS_E1(bp)) {
9198                 return bnx2x_set_e1_mc_list(bp);
9199         } else { /* E1H and newer */
9200                 return bnx2x_set_e1h_mc_list(bp);
9201         }
9202 }
9203
9204 /* called with netif_tx_lock from dev_mcast.c */
9205 void bnx2x_set_rx_mode(struct net_device *dev)
9206 {
9207         struct bnx2x *bp = netdev_priv(dev);
9208         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9209
9210         if (bp->state != BNX2X_STATE_OPEN) {
9211                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9212                 return;
9213         }
9214
9215         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9216
9217         if (dev->flags & IFF_PROMISC)
9218                 rx_mode = BNX2X_RX_MODE_PROMISC;
9219         else if (dev->flags & IFF_ALLMULTI)
9220                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9221         else {
9222                 /* some multicasts */
9223                 if (bnx2x_set_mc_list(bp))
9224                         rx_mode = BNX2X_RX_MODE_ALLMULTI;
9225
9226                 /* some unicasts */
9227                 if (bnx2x_set_uc_list(bp))
9228                         rx_mode = BNX2X_RX_MODE_PROMISC;
9229         }
9230
9231         bp->rx_mode = rx_mode;
9232         bnx2x_set_storm_rx_mode(bp);
9233 }
9234
9235 /* called with rtnl_lock */
9236 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
9237                            int devad, u16 addr)
9238 {
9239         struct bnx2x *bp = netdev_priv(netdev);
9240         u16 value;
9241         int rc;
9242
9243         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
9244            prtad, devad, addr);
9245
9246         /* The HW expects different devad if CL22 is used */
9247         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
9248
9249         bnx2x_acquire_phy_lock(bp);
9250         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
9251         bnx2x_release_phy_lock(bp);
9252         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
9253
9254         if (!rc)
9255                 rc = value;
9256         return rc;
9257 }
9258
9259 /* called with rtnl_lock */
9260 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
9261                             u16 addr, u16 value)
9262 {
9263         struct bnx2x *bp = netdev_priv(netdev);
9264         int rc;
9265
9266         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
9267                            " value 0x%x\n", prtad, devad, addr, value);
9268
9269         /* The HW expects different devad if CL22 is used */
9270         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
9271
9272         bnx2x_acquire_phy_lock(bp);
9273         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
9274         bnx2x_release_phy_lock(bp);
9275         return rc;
9276 }
9277
9278 /* called with rtnl_lock */
9279 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9280 {
9281         struct bnx2x *bp = netdev_priv(dev);
9282         struct mii_ioctl_data *mdio = if_mii(ifr);
9283
9284         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
9285            mdio->phy_id, mdio->reg_num, mdio->val_in);
9286
9287         if (!netif_running(dev))
9288                 return -EAGAIN;
9289
9290         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
9291 }
9292
9293 #ifdef CONFIG_NET_POLL_CONTROLLER
9294 static void poll_bnx2x(struct net_device *dev)
9295 {
9296         struct bnx2x *bp = netdev_priv(dev);
9297
9298         disable_irq(bp->pdev->irq);
9299         bnx2x_interrupt(bp->pdev->irq, dev);
9300         enable_irq(bp->pdev->irq);
9301 }
9302 #endif
9303
9304 static const struct net_device_ops bnx2x_netdev_ops = {
9305         .ndo_open               = bnx2x_open,
9306         .ndo_stop               = bnx2x_close,
9307         .ndo_start_xmit         = bnx2x_start_xmit,
9308         .ndo_select_queue       = bnx2x_select_queue,
9309         .ndo_set_rx_mode        = bnx2x_set_rx_mode,
9310         .ndo_set_mac_address    = bnx2x_change_mac_addr,
9311         .ndo_validate_addr      = eth_validate_addr,
9312         .ndo_do_ioctl           = bnx2x_ioctl,
9313         .ndo_change_mtu         = bnx2x_change_mtu,
9314         .ndo_tx_timeout         = bnx2x_tx_timeout,
9315 #ifdef CONFIG_NET_POLL_CONTROLLER
9316         .ndo_poll_controller    = poll_bnx2x,
9317 #endif
9318 };
9319
9320 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9321                                     struct net_device *dev)
9322 {
9323         struct bnx2x *bp;
9324         int rc;
9325
9326         SET_NETDEV_DEV(dev, &pdev->dev);
9327         bp = netdev_priv(dev);
9328
9329         bp->dev = dev;
9330         bp->pdev = pdev;
9331         bp->flags = 0;
9332         bp->pf_num = PCI_FUNC(pdev->devfn);
9333
9334         rc = pci_enable_device(pdev);
9335         if (rc) {
9336                 dev_err(&bp->pdev->dev,
9337                         "Cannot enable PCI device, aborting\n");
9338                 goto err_out;
9339         }
9340
9341         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9342                 dev_err(&bp->pdev->dev,
9343                         "Cannot find PCI device base address, aborting\n");
9344                 rc = -ENODEV;
9345                 goto err_out_disable;
9346         }
9347
9348         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9349                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9350                        " base address, aborting\n");
9351                 rc = -ENODEV;
9352                 goto err_out_disable;
9353         }
9354
9355         if (atomic_read(&pdev->enable_cnt) == 1) {
9356                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9357                 if (rc) {
9358                         dev_err(&bp->pdev->dev,
9359                                 "Cannot obtain PCI resources, aborting\n");
9360                         goto err_out_disable;
9361                 }
9362
9363                 pci_set_master(pdev);
9364                 pci_save_state(pdev);
9365         }
9366
9367         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9368         if (bp->pm_cap == 0) {
9369                 dev_err(&bp->pdev->dev,
9370                         "Cannot find power management capability, aborting\n");
9371                 rc = -EIO;
9372                 goto err_out_release;
9373         }
9374
9375         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9376         if (bp->pcie_cap == 0) {
9377                 dev_err(&bp->pdev->dev,
9378                         "Cannot find PCI Express capability, aborting\n");
9379                 rc = -EIO;
9380                 goto err_out_release;
9381         }
9382
9383         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
9384                 bp->flags |= USING_DAC_FLAG;
9385                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
9386                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9387                                " failed, aborting\n");
9388                         rc = -EIO;
9389                         goto err_out_release;
9390                 }
9391
9392         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9393                 dev_err(&bp->pdev->dev,
9394                         "System does not support DMA, aborting\n");
9395                 rc = -EIO;
9396                 goto err_out_release;
9397         }
9398
9399         dev->mem_start = pci_resource_start(pdev, 0);
9400         dev->base_addr = dev->mem_start;
9401         dev->mem_end = pci_resource_end(pdev, 0);
9402
9403         dev->irq = pdev->irq;
9404
9405         bp->regview = pci_ioremap_bar(pdev, 0);
9406         if (!bp->regview) {
9407                 dev_err(&bp->pdev->dev,
9408                         "Cannot map register space, aborting\n");
9409                 rc = -ENOMEM;
9410                 goto err_out_release;
9411         }
9412
9413         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9414                                         min_t(u64, BNX2X_DB_SIZE(bp),
9415                                               pci_resource_len(pdev, 2)));
9416         if (!bp->doorbells) {
9417                 dev_err(&bp->pdev->dev,
9418                         "Cannot map doorbell space, aborting\n");
9419                 rc = -ENOMEM;
9420                 goto err_out_unmap;
9421         }
9422
9423         bnx2x_set_power_state(bp, PCI_D0);
9424
9425         /* clean indirect addresses */
9426         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9427                                PCICFG_VENDOR_ID_OFFSET);
9428         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9429         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9430         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9431         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
9432
9433         /* Reset the load counter */
9434         bnx2x_clear_load_cnt(bp);
9435
9436         dev->watchdog_timeo = TX_TIMEOUT;
9437
9438         dev->netdev_ops = &bnx2x_netdev_ops;
9439         bnx2x_set_ethtool_ops(dev);
9440         dev->features |= NETIF_F_SG;
9441         dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9442         if (bp->flags & USING_DAC_FLAG)
9443                 dev->features |= NETIF_F_HIGHDMA;
9444         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9445         dev->features |= NETIF_F_TSO6;
9446         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9447
9448         dev->vlan_features |= NETIF_F_SG;
9449         dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9450         if (bp->flags & USING_DAC_FLAG)
9451                 dev->vlan_features |= NETIF_F_HIGHDMA;
9452         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9453         dev->vlan_features |= NETIF_F_TSO6;
9454
9455 #ifdef BCM_DCBNL
9456         dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9457 #endif
9458
9459         /* get_port_hwinfo() will set prtad and mmds properly */
9460         bp->mdio.prtad = MDIO_PRTAD_NONE;
9461         bp->mdio.mmds = 0;
9462         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9463         bp->mdio.dev = dev;
9464         bp->mdio.mdio_read = bnx2x_mdio_read;
9465         bp->mdio.mdio_write = bnx2x_mdio_write;
9466
9467         return 0;
9468
9469 err_out_unmap:
9470         if (bp->regview) {
9471                 iounmap(bp->regview);
9472                 bp->regview = NULL;
9473         }
9474         if (bp->doorbells) {
9475                 iounmap(bp->doorbells);
9476                 bp->doorbells = NULL;
9477         }
9478
9479 err_out_release:
9480         if (atomic_read(&pdev->enable_cnt) == 1)
9481                 pci_release_regions(pdev);
9482
9483 err_out_disable:
9484         pci_disable_device(pdev);
9485         pci_set_drvdata(pdev, NULL);
9486
9487 err_out:
9488         return rc;
9489 }
9490
9491 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9492                                                  int *width, int *speed)
9493 {
9494         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9495
9496         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9497
9498         /* return value of 1=2.5GHz 2=5GHz */
9499         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9500 }
9501
9502 static int bnx2x_check_firmware(struct bnx2x *bp)
9503 {
9504         const struct firmware *firmware = bp->firmware;
9505         struct bnx2x_fw_file_hdr *fw_hdr;
9506         struct bnx2x_fw_file_section *sections;
9507         u32 offset, len, num_ops;
9508         u16 *ops_offsets;
9509         int i;
9510         const u8 *fw_ver;
9511
9512         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9513                 return -EINVAL;
9514
9515         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9516         sections = (struct bnx2x_fw_file_section *)fw_hdr;
9517
9518         /* Make sure none of the offsets and sizes make us read beyond
9519          * the end of the firmware data */
9520         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9521                 offset = be32_to_cpu(sections[i].offset);
9522                 len = be32_to_cpu(sections[i].len);
9523                 if (offset + len > firmware->size) {
9524                         dev_err(&bp->pdev->dev,
9525                                 "Section %d length is out of bounds\n", i);
9526                         return -EINVAL;
9527                 }
9528         }
9529
9530         /* Likewise for the init_ops offsets */
9531         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9532         ops_offsets = (u16 *)(firmware->data + offset);
9533         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9534
9535         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9536                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
9537                         dev_err(&bp->pdev->dev,
9538                                 "Section offset %d is out of bounds\n", i);
9539                         return -EINVAL;
9540                 }
9541         }
9542
9543         /* Check FW version */
9544         offset = be32_to_cpu(fw_hdr->fw_version.offset);
9545         fw_ver = firmware->data + offset;
9546         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9547             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9548             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9549             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
9550                 dev_err(&bp->pdev->dev,
9551                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
9552                        fw_ver[0], fw_ver[1], fw_ver[2],
9553                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9554                        BCM_5710_FW_MINOR_VERSION,
9555                        BCM_5710_FW_REVISION_VERSION,
9556                        BCM_5710_FW_ENGINEERING_VERSION);
9557                 return -EINVAL;
9558         }
9559
9560         return 0;
9561 }
9562
9563 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
9564 {
9565         const __be32 *source = (const __be32 *)_source;
9566         u32 *target = (u32 *)_target;
9567         u32 i;
9568
9569         for (i = 0; i < n/4; i++)
9570                 target[i] = be32_to_cpu(source[i]);
9571 }
9572
9573 /*
9574    Ops array is stored in the following format:
9575    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9576  */
9577 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
9578 {
9579         const __be32 *source = (const __be32 *)_source;
9580         struct raw_op *target = (struct raw_op *)_target;
9581         u32 i, j, tmp;
9582
9583         for (i = 0, j = 0; i < n/8; i++, j += 2) {
9584                 tmp = be32_to_cpu(source[j]);
9585                 target[i].op = (tmp >> 24) & 0xff;
9586                 target[i].offset = tmp & 0xffffff;
9587                 target[i].raw_data = be32_to_cpu(source[j + 1]);
9588         }
9589 }
9590
9591 /**
9592  * IRO array is stored in the following format:
9593  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9594  */
9595 static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9596 {
9597         const __be32 *source = (const __be32 *)_source;
9598         struct iro *target = (struct iro *)_target;
9599         u32 i, j, tmp;
9600
9601         for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9602                 target[i].base = be32_to_cpu(source[j]);
9603                 j++;
9604                 tmp = be32_to_cpu(source[j]);
9605                 target[i].m1 = (tmp >> 16) & 0xffff;
9606                 target[i].m2 = tmp & 0xffff;
9607                 j++;
9608                 tmp = be32_to_cpu(source[j]);
9609                 target[i].m3 = (tmp >> 16) & 0xffff;
9610                 target[i].size = tmp & 0xffff;
9611                 j++;
9612         }
9613 }
9614
9615 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
9616 {
9617         const __be16 *source = (const __be16 *)_source;
9618         u16 *target = (u16 *)_target;
9619         u32 i;
9620
9621         for (i = 0; i < n/2; i++)
9622                 target[i] = be16_to_cpu(source[i]);
9623 }
9624
9625 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
9626 do {                                                                    \
9627         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
9628         bp->arr = kmalloc(len, GFP_KERNEL);                             \
9629         if (!bp->arr) {                                                 \
9630                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9631                 goto lbl;                                               \
9632         }                                                               \
9633         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
9634              (u8 *)bp->arr, len);                                       \
9635 } while (0)
9636
9637 int bnx2x_init_firmware(struct bnx2x *bp)
9638 {
9639         const char *fw_file_name;
9640         struct bnx2x_fw_file_hdr *fw_hdr;
9641         int rc;
9642
9643         if (CHIP_IS_E1(bp))
9644                 fw_file_name = FW_FILE_NAME_E1;
9645         else if (CHIP_IS_E1H(bp))
9646                 fw_file_name = FW_FILE_NAME_E1H;
9647         else if (CHIP_IS_E2(bp))
9648                 fw_file_name = FW_FILE_NAME_E2;
9649         else {
9650                 BNX2X_ERR("Unsupported chip revision\n");
9651                 return -EINVAL;
9652         }
9653
9654         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
9655
9656         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
9657         if (rc) {
9658                 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
9659                 goto request_firmware_exit;
9660         }
9661
9662         rc = bnx2x_check_firmware(bp);
9663         if (rc) {
9664                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
9665                 goto request_firmware_exit;
9666         }
9667
9668         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9669
9670         /* Initialize the pointers to the init arrays */
9671         /* Blob */
9672         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9673
9674         /* Opcodes */
9675         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9676
9677         /* Offsets */
9678         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9679                             be16_to_cpu_n);
9680
9681         /* STORMs firmware */
9682         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9683                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9684         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
9685                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9686         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9687                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9688         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
9689                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
9690         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9691                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9692         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
9693                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9694         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9695                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9696         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
9697                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
9698         /* IRO */
9699         BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
9700
9701         return 0;
9702
9703 iro_alloc_err:
9704         kfree(bp->init_ops_offsets);
9705 init_offsets_alloc_err:
9706         kfree(bp->init_ops);
9707 init_ops_alloc_err:
9708         kfree(bp->init_data);
9709 request_firmware_exit:
9710         release_firmware(bp->firmware);
9711
9712         return rc;
9713 }
9714
9715 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9716 {
9717         int cid_count = L2_FP_COUNT(l2_cid_count);
9718
9719 #ifdef BCM_CNIC
9720         cid_count += CNIC_CID_MAX;
9721 #endif
9722         return roundup(cid_count, QM_CID_ROUND);
9723 }
9724
9725 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9726                                     const struct pci_device_id *ent)
9727 {
9728         struct net_device *dev = NULL;
9729         struct bnx2x *bp;
9730         int pcie_width, pcie_speed;
9731         int rc, cid_count;
9732
9733         switch (ent->driver_data) {
9734         case BCM57710:
9735         case BCM57711:
9736         case BCM57711E:
9737                 cid_count = FP_SB_MAX_E1x;
9738                 break;
9739
9740         case BCM57712:
9741         case BCM57712E:
9742                 cid_count = FP_SB_MAX_E2;
9743                 break;
9744
9745         default:
9746                 pr_err("Unknown board_type (%ld), aborting\n",
9747                            ent->driver_data);
9748                 return -ENODEV;
9749         }
9750
9751         cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
9752
9753         /* dev zeroed in init_etherdev */
9754         dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9755         if (!dev) {
9756                 dev_err(&pdev->dev, "Cannot allocate net device\n");
9757                 return -ENOMEM;
9758         }
9759
9760         bp = netdev_priv(dev);
9761         bp->msg_enable = debug;
9762
9763         pci_set_drvdata(pdev, dev);
9764
9765         bp->l2_cid_count = cid_count;
9766
9767         rc = bnx2x_init_dev(pdev, dev);
9768         if (rc < 0) {
9769                 free_netdev(dev);
9770                 return rc;
9771         }
9772
9773         rc = bnx2x_init_bp(bp);
9774         if (rc)
9775                 goto init_one_exit;
9776
9777         /* calc qm_cid_count */
9778         bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9779
9780 #ifdef BCM_CNIC
9781         /* disable FCOE L2 queue for E1x*/
9782         if (CHIP_IS_E1x(bp))
9783                 bp->flags |= NO_FCOE_FLAG;
9784
9785 #endif
9786
9787         /* Configure interupt mode: try to enable MSI-X/MSI if
9788          * needed, set bp->num_queues appropriately.
9789          */
9790         bnx2x_set_int_mode(bp);
9791
9792         /* Add all NAPI objects */
9793         bnx2x_add_all_napi(bp);
9794
9795         rc = register_netdev(dev);
9796         if (rc) {
9797                 dev_err(&pdev->dev, "Cannot register net device\n");
9798                 goto init_one_exit;
9799         }
9800
9801 #ifdef BCM_CNIC
9802         if (!NO_FCOE(bp)) {
9803                 /* Add storage MAC address */
9804                 rtnl_lock();
9805                 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9806                 rtnl_unlock();
9807         }
9808 #endif
9809
9810         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9811
9812         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9813                " IRQ %d, ", board_info[ent->driver_data].name,
9814                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
9815                pcie_width,
9816                ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9817                  (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9818                                                 "5GHz (Gen2)" : "2.5GHz",
9819                dev->base_addr, bp->pdev->irq);
9820         pr_cont("node addr %pM\n", dev->dev_addr);
9821
9822         return 0;
9823
9824 init_one_exit:
9825         if (bp->regview)
9826                 iounmap(bp->regview);
9827
9828         if (bp->doorbells)
9829                 iounmap(bp->doorbells);
9830
9831         free_netdev(dev);
9832
9833         if (atomic_read(&pdev->enable_cnt) == 1)
9834                 pci_release_regions(pdev);
9835
9836         pci_disable_device(pdev);
9837         pci_set_drvdata(pdev, NULL);
9838
9839         return rc;
9840 }
9841
9842 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9843 {
9844         struct net_device *dev = pci_get_drvdata(pdev);
9845         struct bnx2x *bp;
9846
9847         if (!dev) {
9848                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
9849                 return;
9850         }
9851         bp = netdev_priv(dev);
9852
9853 #ifdef BCM_CNIC
9854         /* Delete storage MAC address */
9855         if (!NO_FCOE(bp)) {
9856                 rtnl_lock();
9857                 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9858                 rtnl_unlock();
9859         }
9860 #endif
9861
9862 #ifdef BCM_DCBNL
9863         /* Delete app tlvs from dcbnl */
9864         bnx2x_dcbnl_update_applist(bp, true);
9865 #endif
9866
9867         unregister_netdev(dev);
9868
9869         /* Delete all NAPI objects */
9870         bnx2x_del_all_napi(bp);
9871
9872         /* Power on: we can't let PCI layer write to us while we are in D3 */
9873         bnx2x_set_power_state(bp, PCI_D0);
9874
9875         /* Disable MSI/MSI-X */
9876         bnx2x_disable_msi(bp);
9877
9878         /* Power off */
9879         bnx2x_set_power_state(bp, PCI_D3hot);
9880
9881         /* Make sure RESET task is not scheduled before continuing */
9882         cancel_delayed_work_sync(&bp->reset_task);
9883
9884         if (bp->regview)
9885                 iounmap(bp->regview);
9886
9887         if (bp->doorbells)
9888                 iounmap(bp->doorbells);
9889
9890         bnx2x_free_mem_bp(bp);
9891
9892         free_netdev(dev);
9893
9894         if (atomic_read(&pdev->enable_cnt) == 1)
9895                 pci_release_regions(pdev);
9896
9897         pci_disable_device(pdev);
9898         pci_set_drvdata(pdev, NULL);
9899 }
9900
9901 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9902 {
9903         int i;
9904
9905         bp->state = BNX2X_STATE_ERROR;
9906
9907         bp->rx_mode = BNX2X_RX_MODE_NONE;
9908
9909         bnx2x_netif_stop(bp, 0);
9910         netif_carrier_off(bp->dev);
9911
9912         del_timer_sync(&bp->timer);
9913         bp->stats_state = STATS_STATE_DISABLED;
9914         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9915
9916         /* Release IRQs */
9917         bnx2x_free_irq(bp);
9918
9919         /* Free SKBs, SGEs, TPA pool and driver internals */
9920         bnx2x_free_skbs(bp);
9921
9922         for_each_rx_queue(bp, i)
9923                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9924
9925         bnx2x_free_mem(bp);
9926
9927         bp->state = BNX2X_STATE_CLOSED;
9928
9929         return 0;
9930 }
9931
9932 static void bnx2x_eeh_recover(struct bnx2x *bp)
9933 {
9934         u32 val;
9935
9936         mutex_init(&bp->port.phy_mutex);
9937
9938         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9939         bp->link_params.shmem_base = bp->common.shmem_base;
9940         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9941
9942         if (!bp->common.shmem_base ||
9943             (bp->common.shmem_base < 0xA0000) ||
9944             (bp->common.shmem_base >= 0xC0000)) {
9945                 BNX2X_DEV_INFO("MCP not active\n");
9946                 bp->flags |= NO_MCP_FLAG;
9947                 return;
9948         }
9949
9950         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9951         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9952                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9953                 BNX2X_ERR("BAD MCP validity signature\n");
9954
9955         if (!BP_NOMCP(bp)) {
9956                 bp->fw_seq =
9957                     (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9958                     DRV_MSG_SEQ_NUMBER_MASK);
9959                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9960         }
9961 }
9962
9963 /**
9964  * bnx2x_io_error_detected - called when PCI error is detected
9965  * @pdev: Pointer to PCI device
9966  * @state: The current pci connection state
9967  *
9968  * This function is called after a PCI bus error affecting
9969  * this device has been detected.
9970  */
9971 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9972                                                 pci_channel_state_t state)
9973 {
9974         struct net_device *dev = pci_get_drvdata(pdev);
9975         struct bnx2x *bp = netdev_priv(dev);
9976
9977         rtnl_lock();
9978
9979         netif_device_detach(dev);
9980
9981         if (state == pci_channel_io_perm_failure) {
9982                 rtnl_unlock();
9983                 return PCI_ERS_RESULT_DISCONNECT;
9984         }
9985
9986         if (netif_running(dev))
9987                 bnx2x_eeh_nic_unload(bp);
9988
9989         pci_disable_device(pdev);
9990
9991         rtnl_unlock();
9992
9993         /* Request a slot reset */
9994         return PCI_ERS_RESULT_NEED_RESET;
9995 }
9996
9997 /**
9998  * bnx2x_io_slot_reset - called after the PCI bus has been reset
9999  * @pdev: Pointer to PCI device
10000  *
10001  * Restart the card from scratch, as if from a cold-boot.
10002  */
10003 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10004 {
10005         struct net_device *dev = pci_get_drvdata(pdev);
10006         struct bnx2x *bp = netdev_priv(dev);
10007
10008         rtnl_lock();
10009
10010         if (pci_enable_device(pdev)) {
10011                 dev_err(&pdev->dev,
10012                         "Cannot re-enable PCI device after reset\n");
10013                 rtnl_unlock();
10014                 return PCI_ERS_RESULT_DISCONNECT;
10015         }
10016
10017         pci_set_master(pdev);
10018         pci_restore_state(pdev);
10019
10020         if (netif_running(dev))
10021                 bnx2x_set_power_state(bp, PCI_D0);
10022
10023         rtnl_unlock();
10024
10025         return PCI_ERS_RESULT_RECOVERED;
10026 }
10027
10028 /**
10029  * bnx2x_io_resume - called when traffic can start flowing again
10030  * @pdev: Pointer to PCI device
10031  *
10032  * This callback is called when the error recovery driver tells us that
10033  * its OK to resume normal operation.
10034  */
10035 static void bnx2x_io_resume(struct pci_dev *pdev)
10036 {
10037         struct net_device *dev = pci_get_drvdata(pdev);
10038         struct bnx2x *bp = netdev_priv(dev);
10039
10040         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10041                 printk(KERN_ERR "Handling parity error recovery. "
10042                                 "Try again later\n");
10043                 return;
10044         }
10045
10046         rtnl_lock();
10047
10048         bnx2x_eeh_recover(bp);
10049
10050         if (netif_running(dev))
10051                 bnx2x_nic_load(bp, LOAD_NORMAL);
10052
10053         netif_device_attach(dev);
10054
10055         rtnl_unlock();
10056 }
10057
10058 static struct pci_error_handlers bnx2x_err_handler = {
10059         .error_detected = bnx2x_io_error_detected,
10060         .slot_reset     = bnx2x_io_slot_reset,
10061         .resume         = bnx2x_io_resume,
10062 };
10063
10064 static struct pci_driver bnx2x_pci_driver = {
10065         .name        = DRV_MODULE_NAME,
10066         .id_table    = bnx2x_pci_tbl,
10067         .probe       = bnx2x_init_one,
10068         .remove      = __devexit_p(bnx2x_remove_one),
10069         .suspend     = bnx2x_suspend,
10070         .resume      = bnx2x_resume,
10071         .err_handler = &bnx2x_err_handler,
10072 };
10073
10074 static int __init bnx2x_init(void)
10075 {
10076         int ret;
10077
10078         pr_info("%s", version);
10079
10080         bnx2x_wq = create_singlethread_workqueue("bnx2x");
10081         if (bnx2x_wq == NULL) {
10082                 pr_err("Cannot create workqueue\n");
10083                 return -ENOMEM;
10084         }
10085
10086         ret = pci_register_driver(&bnx2x_pci_driver);
10087         if (ret) {
10088                 pr_err("Cannot register driver\n");
10089                 destroy_workqueue(bnx2x_wq);
10090         }
10091         return ret;
10092 }
10093
10094 static void __exit bnx2x_cleanup(void)
10095 {
10096         pci_unregister_driver(&bnx2x_pci_driver);
10097
10098         destroy_workqueue(bnx2x_wq);
10099 }
10100
10101 module_init(bnx2x_init);
10102 module_exit(bnx2x_cleanup);
10103
10104 #ifdef BCM_CNIC
10105
10106 /* count denotes the number of new completions we have seen */
10107 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
10108 {
10109         struct eth_spe *spe;
10110
10111 #ifdef BNX2X_STOP_ON_ERROR
10112         if (unlikely(bp->panic))
10113                 return;
10114 #endif
10115
10116         spin_lock_bh(&bp->spq_lock);
10117         BUG_ON(bp->cnic_spq_pending < count);
10118         bp->cnic_spq_pending -= count;
10119
10120
10121         for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
10122                 u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
10123                                 & SPE_HDR_CONN_TYPE) >>
10124                                 SPE_HDR_CONN_TYPE_SHIFT;
10125
10126                 /* Set validation for iSCSI L2 client before sending SETUP
10127                  *  ramrod
10128                  */
10129                 if (type == ETH_CONNECTION_TYPE) {
10130                         u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
10131                                              hdr.conn_and_cmd_data) >>
10132                                 SPE_HDR_CMD_ID_SHIFT) & 0xff;
10133
10134                         if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
10135                                 bnx2x_set_ctx_validation(&bp->context.
10136                                                 vcxt[BNX2X_ISCSI_ETH_CID].eth,
10137                                         HW_CID(bp, BNX2X_ISCSI_ETH_CID));
10138                 }
10139
10140                 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
10141                  * We also check that the number of outstanding
10142                  * COMMON ramrods is not more than the EQ and SPQ can
10143                  * accommodate.
10144                  */
10145                 if (type == ETH_CONNECTION_TYPE) {
10146                         if (!atomic_read(&bp->cq_spq_left))
10147                                 break;
10148                         else
10149                                 atomic_dec(&bp->cq_spq_left);
10150                 } else if (type == NONE_CONNECTION_TYPE) {
10151                         if (!atomic_read(&bp->eq_spq_left))
10152                                 break;
10153                         else
10154                                 atomic_dec(&bp->eq_spq_left);
10155                 } else if ((type == ISCSI_CONNECTION_TYPE) ||
10156                            (type == FCOE_CONNECTION_TYPE)) {
10157                         if (bp->cnic_spq_pending >=
10158                             bp->cnic_eth_dev.max_kwqe_pending)
10159                                 break;
10160                         else
10161                                 bp->cnic_spq_pending++;
10162                 } else {
10163                         BNX2X_ERR("Unknown SPE type: %d\n", type);
10164                         bnx2x_panic();
10165                         break;
10166                 }
10167
10168                 spe = bnx2x_sp_get_next(bp);
10169                 *spe = *bp->cnic_kwq_cons;
10170
10171                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
10172                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
10173
10174                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
10175                         bp->cnic_kwq_cons = bp->cnic_kwq;
10176                 else
10177                         bp->cnic_kwq_cons++;
10178         }
10179         bnx2x_sp_prod_update(bp);
10180         spin_unlock_bh(&bp->spq_lock);
10181 }
10182
10183 static int bnx2x_cnic_sp_queue(struct net_device *dev,
10184                                struct kwqe_16 *kwqes[], u32 count)
10185 {
10186         struct bnx2x *bp = netdev_priv(dev);
10187         int i;
10188
10189 #ifdef BNX2X_STOP_ON_ERROR
10190         if (unlikely(bp->panic))
10191                 return -EIO;
10192 #endif
10193
10194         spin_lock_bh(&bp->spq_lock);
10195
10196         for (i = 0; i < count; i++) {
10197                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
10198
10199                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
10200                         break;
10201
10202                 *bp->cnic_kwq_prod = *spe;
10203
10204                 bp->cnic_kwq_pending++;
10205
10206                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
10207                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
10208                    spe->data.update_data_addr.hi,
10209                    spe->data.update_data_addr.lo,
10210                    bp->cnic_kwq_pending);
10211
10212                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
10213                         bp->cnic_kwq_prod = bp->cnic_kwq;
10214                 else
10215                         bp->cnic_kwq_prod++;
10216         }
10217
10218         spin_unlock_bh(&bp->spq_lock);
10219
10220         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
10221                 bnx2x_cnic_sp_post(bp, 0);
10222
10223         return i;
10224 }
10225
10226 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
10227 {
10228         struct cnic_ops *c_ops;
10229         int rc = 0;
10230
10231         mutex_lock(&bp->cnic_mutex);
10232         c_ops = rcu_dereference_protected(bp->cnic_ops,
10233                                           lockdep_is_held(&bp->cnic_mutex));
10234         if (c_ops)
10235                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
10236         mutex_unlock(&bp->cnic_mutex);
10237
10238         return rc;
10239 }
10240
10241 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
10242 {
10243         struct cnic_ops *c_ops;
10244         int rc = 0;
10245
10246         rcu_read_lock();
10247         c_ops = rcu_dereference(bp->cnic_ops);
10248         if (c_ops)
10249                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
10250         rcu_read_unlock();
10251
10252         return rc;
10253 }
10254
10255 /*
10256  * for commands that have no data
10257  */
10258 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
10259 {
10260         struct cnic_ctl_info ctl = {0};
10261
10262         ctl.cmd = cmd;
10263
10264         return bnx2x_cnic_ctl_send(bp, &ctl);
10265 }
10266
10267 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
10268 {
10269         struct cnic_ctl_info ctl;
10270
10271         /* first we tell CNIC and only then we count this as a completion */
10272         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
10273         ctl.data.comp.cid = cid;
10274
10275         bnx2x_cnic_ctl_send_bh(bp, &ctl);
10276         bnx2x_cnic_sp_post(bp, 0);
10277 }
10278
10279 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
10280 {
10281         struct bnx2x *bp = netdev_priv(dev);
10282         int rc = 0;
10283
10284         switch (ctl->cmd) {
10285         case DRV_CTL_CTXTBL_WR_CMD: {
10286                 u32 index = ctl->data.io.offset;
10287                 dma_addr_t addr = ctl->data.io.dma_addr;
10288
10289                 bnx2x_ilt_wr(bp, index, addr);
10290                 break;
10291         }
10292
10293         case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
10294                 int count = ctl->data.credit.credit_count;
10295
10296                 bnx2x_cnic_sp_post(bp, count);
10297                 break;
10298         }
10299
10300         /* rtnl_lock is held.  */
10301         case DRV_CTL_START_L2_CMD: {
10302                 u32 cli = ctl->data.ring.client_id;
10303
10304                 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
10305                 bnx2x_del_fcoe_eth_macs(bp);
10306
10307                 /* Set iSCSI MAC address */
10308                 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
10309
10310                 mmiowb();
10311                 barrier();
10312
10313                 /* Start accepting on iSCSI L2 ring. Accept all multicasts
10314                  * because it's the only way for UIO Client to accept
10315                  * multicasts (in non-promiscuous mode only one Client per
10316                  * function will receive multicast packets (leading in our
10317                  * case).
10318                  */
10319                 bnx2x_rxq_set_mac_filters(bp, cli,
10320                         BNX2X_ACCEPT_UNICAST |
10321                         BNX2X_ACCEPT_BROADCAST |
10322                         BNX2X_ACCEPT_ALL_MULTICAST);
10323                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10324
10325                 break;
10326         }
10327
10328         /* rtnl_lock is held.  */
10329         case DRV_CTL_STOP_L2_CMD: {
10330                 u32 cli = ctl->data.ring.client_id;
10331
10332                 /* Stop accepting on iSCSI L2 ring */
10333                 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
10334                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10335
10336                 mmiowb();
10337                 barrier();
10338
10339                 /* Unset iSCSI L2 MAC */
10340                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
10341                 break;
10342         }
10343         case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
10344                 int count = ctl->data.credit.credit_count;
10345
10346                 smp_mb__before_atomic_inc();
10347                 atomic_add(count, &bp->cq_spq_left);
10348                 smp_mb__after_atomic_inc();
10349                 break;
10350         }
10351
10352         default:
10353                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10354                 rc = -EINVAL;
10355         }
10356
10357         return rc;
10358 }
10359
10360 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
10361 {
10362         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10363
10364         if (bp->flags & USING_MSIX_FLAG) {
10365                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10366                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10367                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10368         } else {
10369                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10370                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10371         }
10372         if (CHIP_IS_E2(bp))
10373                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10374         else
10375                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10376
10377         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
10378         cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
10379         cp->irq_arr[1].status_blk = bp->def_status_blk;
10380         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
10381         cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
10382
10383         cp->num_irq = 2;
10384 }
10385
10386 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10387                                void *data)
10388 {
10389         struct bnx2x *bp = netdev_priv(dev);
10390         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10391
10392         if (ops == NULL)
10393                 return -EINVAL;
10394
10395         if (atomic_read(&bp->intr_sem) != 0)
10396                 return -EBUSY;
10397
10398         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10399         if (!bp->cnic_kwq)
10400                 return -ENOMEM;
10401
10402         bp->cnic_kwq_cons = bp->cnic_kwq;
10403         bp->cnic_kwq_prod = bp->cnic_kwq;
10404         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10405
10406         bp->cnic_spq_pending = 0;
10407         bp->cnic_kwq_pending = 0;
10408
10409         bp->cnic_data = data;
10410
10411         cp->num_irq = 0;
10412         cp->drv_state = CNIC_DRV_STATE_REGD;
10413         cp->iro_arr = bp->iro_arr;
10414
10415         bnx2x_setup_cnic_irq_info(bp);
10416
10417         rcu_assign_pointer(bp->cnic_ops, ops);
10418
10419         return 0;
10420 }
10421
10422 static int bnx2x_unregister_cnic(struct net_device *dev)
10423 {
10424         struct bnx2x *bp = netdev_priv(dev);
10425         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10426
10427         mutex_lock(&bp->cnic_mutex);
10428         cp->drv_state = 0;
10429         rcu_assign_pointer(bp->cnic_ops, NULL);
10430         mutex_unlock(&bp->cnic_mutex);
10431         synchronize_rcu();
10432         kfree(bp->cnic_kwq);
10433         bp->cnic_kwq = NULL;
10434
10435         return 0;
10436 }
10437
10438 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10439 {
10440         struct bnx2x *bp = netdev_priv(dev);
10441         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10442
10443         /* If both iSCSI and FCoE are disabled - return NULL in
10444          * order to indicate CNIC that it should not try to work
10445          * with this device.
10446          */
10447         if (NO_ISCSI(bp) && NO_FCOE(bp))
10448                 return NULL;
10449
10450         cp->drv_owner = THIS_MODULE;
10451         cp->chip_id = CHIP_ID(bp);
10452         cp->pdev = bp->pdev;
10453         cp->io_base = bp->regview;
10454         cp->io_base2 = bp->doorbells;
10455         cp->max_kwqe_pending = 8;
10456         cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
10457         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10458                              bnx2x_cid_ilt_lines(bp);
10459         cp->ctx_tbl_len = CNIC_ILT_LINES;
10460         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
10461         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10462         cp->drv_ctl = bnx2x_drv_ctl;
10463         cp->drv_register_cnic = bnx2x_register_cnic;
10464         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
10465         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10466         cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10467                 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
10468         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10469
10470         if (NO_ISCSI_OOO(bp))
10471                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10472
10473         if (NO_ISCSI(bp))
10474                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10475
10476         if (NO_FCOE(bp))
10477                 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10478
10479         DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10480                          "starting cid %d\n",
10481            cp->ctx_blk_size,
10482            cp->ctx_tbl_offset,
10483            cp->ctx_tbl_len,
10484            cp->starting_cid);
10485         return cp;
10486 }
10487 EXPORT_SYMBOL(bnx2x_cnic_probe);
10488
10489 #endif /* BCM_CNIC */
10490