Merge branch 'omap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind...
[pandora-kernel.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.53-1"
61 #define DRV_MODULE_RELDATE      "2010/18/04"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106                                 "(1 INT#x; 2 MSI)");
107
108 static int dropless_fc;
109 module_param(dropless_fc, int, 0);
110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
112 static int poll;
113 module_param(poll, int, 0);
114 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115
116 static int mrrs = -1;
117 module_param(mrrs, int, 0);
118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
120 static int debug;
121 module_param(debug, int, 0);
122 MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125
126 static struct workqueue_struct *bnx2x_wq;
127
128 enum bnx2x_board_type {
129         BCM57710 = 0,
130         BCM57711 = 1,
131         BCM57711E = 2,
132 };
133
134 /* indexed by board_type, above */
135 static struct {
136         char *name;
137 } board_info[] __devinitdata = {
138         { "Broadcom NetXtreme II BCM57710 XGb" },
139         { "Broadcom NetXtreme II BCM57711 XGb" },
140         { "Broadcom NetXtreme II BCM57711E XGb" }
141 };
142
143
144 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
148         { 0 }
149 };
150
151 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153 /****************************************************************************
154 * General service functions
155 ****************************************************************************/
156
157 /* used only at init
158  * locking is done by mcp
159  */
160 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 {
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165                                PCICFG_VENDOR_ID_OFFSET);
166 }
167
168 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169 {
170         u32 val;
171
172         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175                                PCICFG_VENDOR_ID_OFFSET);
176
177         return val;
178 }
179
180 static const u32 dmae_reg_go_c[] = {
181         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185 };
186
187 /* copy command into DMAE command memory and set DMAE command go */
188 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189                             int idx)
190 {
191         u32 cmd_offset;
192         int i;
193
194         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
198                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200         }
201         REG_WR(bp, dmae_reg_go_c[idx], 1);
202 }
203
204 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205                       u32 len32)
206 {
207         struct dmae_command dmae;
208         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
209         int cnt = 200;
210
211         if (!bp->dmae_ready) {
212                 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
215                    "  using indirect\n", dst_addr, len32);
216                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217                 return;
218         }
219
220         memset(&dmae, 0, sizeof(struct dmae_command));
221
222         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225 #ifdef __BIG_ENDIAN
226                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
227 #else
228                        DMAE_CMD_ENDIANITY_DW_SWAP |
229 #endif
230                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232         dmae.src_addr_lo = U64_LO(dma_addr);
233         dmae.src_addr_hi = U64_HI(dma_addr);
234         dmae.dst_addr_lo = dst_addr >> 2;
235         dmae.dst_addr_hi = 0;
236         dmae.len = len32;
237         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239         dmae.comp_val = DMAE_COMP_VAL;
240
241         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
243                     "dst_addr [%x:%08x (%08x)]\n"
244            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
245            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251
252         mutex_lock(&bp->dmae_mutex);
253
254         *wb_comp = 0;
255
256         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
257
258         udelay(5);
259
260         while (*wb_comp != DMAE_COMP_VAL) {
261                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
263                 if (!cnt) {
264                         BNX2X_ERR("DMAE timeout!\n");
265                         break;
266                 }
267                 cnt--;
268                 /* adjust delay for emulation/FPGA */
269                 if (CHIP_REV_IS_SLOW(bp))
270                         msleep(100);
271                 else
272                         udelay(5);
273         }
274
275         mutex_unlock(&bp->dmae_mutex);
276 }
277
278 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 {
280         struct dmae_command dmae;
281         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
282         int cnt = 200;
283
284         if (!bp->dmae_ready) {
285                 u32 *data = bnx2x_sp(bp, wb_data[0]);
286                 int i;
287
288                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
289                    "  using indirect\n", src_addr, len32);
290                 for (i = 0; i < len32; i++)
291                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292                 return;
293         }
294
295         memset(&dmae, 0, sizeof(struct dmae_command));
296
297         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300 #ifdef __BIG_ENDIAN
301                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
302 #else
303                        DMAE_CMD_ENDIANITY_DW_SWAP |
304 #endif
305                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307         dmae.src_addr_lo = src_addr >> 2;
308         dmae.src_addr_hi = 0;
309         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311         dmae.len = len32;
312         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314         dmae.comp_val = DMAE_COMP_VAL;
315
316         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
318                     "dst_addr [%x:%08x (%08x)]\n"
319            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
320            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323
324         mutex_lock(&bp->dmae_mutex);
325
326         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
327         *wb_comp = 0;
328
329         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
330
331         udelay(5);
332
333         while (*wb_comp != DMAE_COMP_VAL) {
334
335                 if (!cnt) {
336                         BNX2X_ERR("DMAE timeout!\n");
337                         break;
338                 }
339                 cnt--;
340                 /* adjust delay for emulation/FPGA */
341                 if (CHIP_REV_IS_SLOW(bp))
342                         msleep(100);
343                 else
344                         udelay(5);
345         }
346         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349
350         mutex_unlock(&bp->dmae_mutex);
351 }
352
353 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354                                u32 addr, u32 len)
355 {
356         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
357         int offset = 0;
358
359         while (len > dmae_wr_max) {
360                 bnx2x_write_dmae(bp, phys_addr + offset,
361                                  addr + offset, dmae_wr_max);
362                 offset += dmae_wr_max * 4;
363                 len -= dmae_wr_max;
364         }
365
366         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367 }
368
369 /* used only for slowpath so not inlined */
370 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371 {
372         u32 wb_write[2];
373
374         wb_write[0] = val_hi;
375         wb_write[1] = val_lo;
376         REG_WR_DMAE(bp, reg, wb_write, 2);
377 }
378
379 #ifdef USE_WB_RD
380 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381 {
382         u32 wb_data[2];
383
384         REG_RD_DMAE(bp, reg, wb_data, 2);
385
386         return HILO_U64(wb_data[0], wb_data[1]);
387 }
388 #endif
389
390 static int bnx2x_mc_assert(struct bnx2x *bp)
391 {
392         char last_idx;
393         int i, rc = 0;
394         u32 row0, row1, row2, row3;
395
396         /* XSTORM */
397         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
399         if (last_idx)
400                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402         /* print the asserts */
403         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i));
407                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416                                   " 0x%08x 0x%08x 0x%08x\n",
417                                   i, row3, row2, row1, row0);
418                         rc++;
419                 } else {
420                         break;
421                 }
422         }
423
424         /* TSTORM */
425         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
427         if (last_idx)
428                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430         /* print the asserts */
431         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i));
435                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444                                   " 0x%08x 0x%08x 0x%08x\n",
445                                   i, row3, row2, row1, row0);
446                         rc++;
447                 } else {
448                         break;
449                 }
450         }
451
452         /* CSTORM */
453         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
455         if (last_idx)
456                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458         /* print the asserts */
459         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i));
463                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472                                   " 0x%08x 0x%08x 0x%08x\n",
473                                   i, row3, row2, row1, row0);
474                         rc++;
475                 } else {
476                         break;
477                 }
478         }
479
480         /* USTORM */
481         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482                            USTORM_ASSERT_LIST_INDEX_OFFSET);
483         if (last_idx)
484                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486         /* print the asserts */
487         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i));
491                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
493                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
495                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500                                   " 0x%08x 0x%08x 0x%08x\n",
501                                   i, row3, row2, row1, row0);
502                         rc++;
503                 } else {
504                         break;
505                 }
506         }
507
508         return rc;
509 }
510
511 static void bnx2x_fw_dump(struct bnx2x *bp)
512 {
513         u32 addr;
514         u32 mark, offset;
515         __be32 data[9];
516         int word;
517
518         if (BP_NOMCP(bp)) {
519                 BNX2X_ERR("NO MCP - can not dump\n");
520                 return;
521         }
522
523         addr = bp->common.shmem_base - 0x0800 + 4;
524         mark = REG_RD(bp, addr);
525         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526         pr_err("begin fw dump (mark 0x%x)\n", mark);
527
528         pr_err("");
529         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536                 for (word = 0; word < 8; word++)
537                         data[word] = htonl(REG_RD(bp, offset + 4*word));
538                 data[8] = 0x0;
539                 pr_cont("%s", (char *)data);
540         }
541         pr_err("end of fw dump\n");
542 }
543
544 static void bnx2x_panic_dump(struct bnx2x *bp)
545 {
546         int i;
547         u16 j, start, end;
548
549         bp->stats_state = STATS_STATE_DISABLED;
550         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
552         BNX2X_ERR("begin crash dump -----------------\n");
553
554         /* Indices */
555         /* Common */
556         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
557                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
558                   "  spq_prod_idx(0x%x)\n",
559                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562         /* Rx */
563         for_each_queue(bp, i) {
564                 struct bnx2x_fastpath *fp = &bp->fp[i];
565
566                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
567                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
568                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
569                           i, fp->rx_bd_prod, fp->rx_bd_cons,
570                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
573                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574                           fp->rx_sge_prod, fp->last_max_sge,
575                           le16_to_cpu(fp->fp_u_idx),
576                           fp->status_blk->u_status_block.status_block_index);
577         }
578
579         /* Tx */
580         for_each_queue(bp, i) {
581                 struct bnx2x_fastpath *fp = &bp->fp[i];
582
583                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
584                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
585                           "  *tx_cons_sb(0x%x)\n",
586                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
589                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590                           fp->status_blk->c_status_block.status_block_index,
591                           fp->tx_db.data.prod);
592         }
593
594         /* Rings */
595         /* Rx */
596         for_each_queue(bp, i) {
597                 struct bnx2x_fastpath *fp = &bp->fp[i];
598
599                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601                 for (j = start; j != end; j = RX_BD(j + 1)) {
602                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
605                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
606                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
607                 }
608
609                 start = RX_SGE(fp->rx_sge_prod);
610                 end = RX_SGE(fp->last_max_sge);
611                 for (j = start; j != end; j = RX_SGE(j + 1)) {
612                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
615                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
616                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
617                 }
618
619                 start = RCQ_BD(fp->rx_comp_cons - 10);
620                 end = RCQ_BD(fp->rx_comp_cons + 503);
621                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
624                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
626                 }
627         }
628
629         /* Tx */
630         for_each_queue(bp, i) {
631                 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635                 for (j = start; j != end; j = TX_BD(j + 1)) {
636                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
638                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639                                   i, j, sw_bd->skb, sw_bd->first_bd);
640                 }
641
642                 start = TX_BD(fp->tx_bd_cons - 10);
643                 end = TX_BD(fp->tx_bd_cons + 254);
644                 for (j = start; j != end; j = TX_BD(j + 1)) {
645                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
647                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
649                 }
650         }
651
652         bnx2x_fw_dump(bp);
653         bnx2x_mc_assert(bp);
654         BNX2X_ERR("end crash dump -----------------\n");
655 }
656
657 static void bnx2x_int_enable(struct bnx2x *bp)
658 {
659         int port = BP_PORT(bp);
660         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661         u32 val = REG_RD(bp, addr);
662         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
664
665         if (msix) {
666                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                          HC_CONFIG_0_REG_INT_LINE_EN_0);
668                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670         } else if (msi) {
671                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675         } else {
676                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
679                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
680
681                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682                    val, port, addr);
683
684                 REG_WR(bp, addr, val);
685
686                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687         }
688
689         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
690            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
691
692         REG_WR(bp, addr, val);
693         /*
694          * Ensure that HC_CONFIG is written before leading/trailing edge config
695          */
696         mmiowb();
697         barrier();
698
699         if (CHIP_IS_E1H(bp)) {
700                 /* init leading/trailing edge */
701                 if (IS_E1HMF(bp)) {
702                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
703                         if (bp->port.pmf)
704                                 /* enable nig and gpio3 attention */
705                                 val |= 0x1100;
706                 } else
707                         val = 0xffff;
708
709                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711         }
712
713         /* Make sure that interrupts are indeed enabled from here on */
714         mmiowb();
715 }
716
717 static void bnx2x_int_disable(struct bnx2x *bp)
718 {
719         int port = BP_PORT(bp);
720         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721         u32 val = REG_RD(bp, addr);
722
723         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
726                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729            val, port, addr);
730
731         /* flush all outstanding writes */
732         mmiowb();
733
734         REG_WR(bp, addr, val);
735         if (REG_RD(bp, addr) != val)
736                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737 }
738
739 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
740 {
741         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
742         int i, offset;
743
744         /* disable interrupt handling */
745         atomic_inc(&bp->intr_sem);
746         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
748         if (disable_hw)
749                 /* prevent the HW from sending interrupts */
750                 bnx2x_int_disable(bp);
751
752         /* make sure all ISRs are done */
753         if (msix) {
754                 synchronize_irq(bp->msix_table[0].vector);
755                 offset = 1;
756 #ifdef BCM_CNIC
757                 offset++;
758 #endif
759                 for_each_queue(bp, i)
760                         synchronize_irq(bp->msix_table[i + offset].vector);
761         } else
762                 synchronize_irq(bp->pdev->irq);
763
764         /* make sure sp_task is not running */
765         cancel_delayed_work(&bp->sp_task);
766         flush_workqueue(bnx2x_wq);
767 }
768
769 /* fast path */
770
771 /*
772  * General service functions
773  */
774
775 /* Return true if succeeded to acquire the lock */
776 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777 {
778         u32 lock_status;
779         u32 resource_bit = (1 << resource);
780         int func = BP_FUNC(bp);
781         u32 hw_lock_control_reg;
782
783         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785         /* Validating that the resource is within range */
786         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787                 DP(NETIF_MSG_HW,
788                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
790                 return -EINVAL;
791         }
792
793         if (func <= 5)
794                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795         else
796                 hw_lock_control_reg =
797                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799         /* Try to acquire the lock */
800         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801         lock_status = REG_RD(bp, hw_lock_control_reg);
802         if (lock_status & resource_bit)
803                 return true;
804
805         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806         return false;
807 }
808
809 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810                                 u8 storm, u16 index, u8 op, u8 update)
811 {
812         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813                        COMMAND_REG_INT_ACK);
814         struct igu_ack_register igu_ack;
815
816         igu_ack.status_block_index = index;
817         igu_ack.sb_id_and_flags =
818                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
823         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824            (*(u32 *)&igu_ack), hc_addr);
825         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
826
827         /* Make sure that ACK is written */
828         mmiowb();
829         barrier();
830 }
831
832 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
833 {
834         struct host_status_block *fpsb = fp->status_blk;
835
836         barrier(); /* status block is written to by the chip */
837         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
839 }
840
841 static u16 bnx2x_ack_int(struct bnx2x *bp)
842 {
843         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844                        COMMAND_REG_SIMD_MASK);
845         u32 result = REG_RD(bp, hc_addr);
846
847         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848            result, hc_addr);
849
850         return result;
851 }
852
853
854 /*
855  * fast path service functions
856  */
857
858 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859 {
860         /* Tell compiler that consumer and producer can change */
861         barrier();
862         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
863 }
864
865 /* free skb in the packet ring at pos idx
866  * return idx of last bd freed
867  */
868 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869                              u16 idx)
870 {
871         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872         struct eth_tx_start_bd *tx_start_bd;
873         struct eth_tx_bd *tx_data_bd;
874         struct sk_buff *skb = tx_buf->skb;
875         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
876         int nbd;
877
878         /* prefetch skb end pointer to speedup dev_kfree_skb() */
879         prefetch(&skb->end);
880
881         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
882            idx, tx_buf, skb);
883
884         /* unmap first bd */
885         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
889
890         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891 #ifdef BNX2X_STOP_ON_ERROR
892         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893                 BNX2X_ERR("BAD nbd!\n");
894                 bnx2x_panic();
895         }
896 #endif
897         new_cons = nbd + tx_buf->first_bd;
898
899         /* Get the next bd */
900         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
901
902         /* Skip a parse bd... */
903         --nbd;
904         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906         /* ...and the TSO split header bd since they have no mapping */
907         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908                 --nbd;
909                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
910         }
911
912         /* now free frags */
913         while (nbd > 0) {
914
915                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
919                 if (--nbd)
920                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921         }
922
923         /* release skb */
924         WARN_ON(!skb);
925         dev_kfree_skb(skb);
926         tx_buf->first_bd = 0;
927         tx_buf->skb = NULL;
928
929         return new_cons;
930 }
931
932 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
933 {
934         s16 used;
935         u16 prod;
936         u16 cons;
937
938         prod = fp->tx_bd_prod;
939         cons = fp->tx_bd_cons;
940
941         /* NUM_TX_RINGS = number of "next-page" entries
942            It will be used as a threshold */
943         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
944
945 #ifdef BNX2X_STOP_ON_ERROR
946         WARN_ON(used < 0);
947         WARN_ON(used > fp->bp->tx_ring_size);
948         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
949 #endif
950
951         return (s16)(fp->bp->tx_ring_size) - used;
952 }
953
954 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955 {
956         u16 hw_cons;
957
958         /* Tell compiler that status block fields can change */
959         barrier();
960         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961         return hw_cons != fp->tx_pkt_cons;
962 }
963
964 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
965 {
966         struct bnx2x *bp = fp->bp;
967         struct netdev_queue *txq;
968         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
969
970 #ifdef BNX2X_STOP_ON_ERROR
971         if (unlikely(bp->panic))
972                 return -1;
973 #endif
974
975         txq = netdev_get_tx_queue(bp->dev, fp->index);
976         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977         sw_cons = fp->tx_pkt_cons;
978
979         while (sw_cons != hw_cons) {
980                 u16 pkt_cons;
981
982                 pkt_cons = TX_BD(sw_cons);
983
984                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
986                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
987                    hw_cons, sw_cons, pkt_cons);
988
989 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
990                         rmb();
991                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992                 }
993 */
994                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995                 sw_cons++;
996         }
997
998         fp->tx_pkt_cons = sw_cons;
999         fp->tx_bd_cons = bd_cons;
1000
1001         /* Need to make the tx_bd_cons update visible to start_xmit()
1002          * before checking for netif_tx_queue_stopped().  Without the
1003          * memory barrier, there is a small possibility that
1004          * start_xmit() will miss it and cause the queue to be stopped
1005          * forever.
1006          */
1007         smp_mb();
1008
1009         /* TBD need a thresh? */
1010         if (unlikely(netif_tx_queue_stopped(txq))) {
1011                 /* Taking tx_lock() is needed to prevent reenabling the queue
1012                  * while it's empty. This could have happen if rx_action() gets
1013                  * suspended in bnx2x_tx_int() after the condition before
1014                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015                  *
1016                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017                  * sends some packets consuming the whole queue again->
1018                  * stops the queue
1019                  */
1020
1021                 __netif_tx_lock(txq, smp_processor_id());
1022
1023                 if ((netif_tx_queue_stopped(txq)) &&
1024                     (bp->state == BNX2X_STATE_OPEN) &&
1025                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026                         netif_tx_wake_queue(txq);
1027
1028                 __netif_tx_unlock(txq);
1029         }
1030         return 0;
1031 }
1032
1033 #ifdef BCM_CNIC
1034 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035 #endif
1036
1037 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038                            union eth_rx_cqe *rr_cqe)
1039 {
1040         struct bnx2x *bp = fp->bp;
1041         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
1044         DP(BNX2X_MSG_SP,
1045            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1046            fp->index, cid, command, bp->state,
1047            rr_cqe->ramrod_cqe.ramrod_type);
1048
1049         bp->spq_left++;
1050
1051         if (fp->index) {
1052                 switch (command | fp->state) {
1053                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054                                                 BNX2X_FP_STATE_OPENING):
1055                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056                            cid);
1057                         fp->state = BNX2X_FP_STATE_OPEN;
1058                         break;
1059
1060                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062                            cid);
1063                         fp->state = BNX2X_FP_STATE_HALTED;
1064                         break;
1065
1066                 default:
1067                         BNX2X_ERR("unexpected MC reply (%d)  "
1068                                   "fp[%d] state is %x\n",
1069                                   command, fp->index, fp->state);
1070                         break;
1071                 }
1072                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1073                 return;
1074         }
1075
1076         switch (command | bp->state) {
1077         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079                 bp->state = BNX2X_STATE_OPEN;
1080                 break;
1081
1082         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085                 fp->state = BNX2X_FP_STATE_HALTED;
1086                 break;
1087
1088         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1091                 break;
1092
1093 #ifdef BCM_CNIC
1094         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096                 bnx2x_cnic_cfc_comp(bp, cid);
1097                 break;
1098 #endif
1099
1100         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103                 bp->set_mac_pending--;
1104                 smp_wmb();
1105                 break;
1106
1107         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109                 bp->set_mac_pending--;
1110                 smp_wmb();
1111                 break;
1112
1113         default:
1114                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1115                           command, bp->state);
1116                 break;
1117         }
1118         mb(); /* force bnx2x_wait_ramrod() to see the change */
1119 }
1120
1121 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122                                      struct bnx2x_fastpath *fp, u16 index)
1123 {
1124         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125         struct page *page = sw_buf->page;
1126         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128         /* Skip "next page" elements */
1129         if (!page)
1130                 return;
1131
1132         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134         __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136         sw_buf->page = NULL;
1137         sge->addr_hi = 0;
1138         sge->addr_lo = 0;
1139 }
1140
1141 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142                                            struct bnx2x_fastpath *fp, int last)
1143 {
1144         int i;
1145
1146         for (i = 0; i < last; i++)
1147                 bnx2x_free_rx_sge(bp, fp, i);
1148 }
1149
1150 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151                                      struct bnx2x_fastpath *fp, u16 index)
1152 {
1153         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156         dma_addr_t mapping;
1157
1158         if (unlikely(page == NULL))
1159                 return -ENOMEM;
1160
1161         mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165                 return -ENOMEM;
1166         }
1167
1168         sw_buf->page = page;
1169         dma_unmap_addr_set(sw_buf, mapping, mapping);
1170
1171         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174         return 0;
1175 }
1176
1177 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178                                      struct bnx2x_fastpath *fp, u16 index)
1179 {
1180         struct sk_buff *skb;
1181         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183         dma_addr_t mapping;
1184
1185         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186         if (unlikely(skb == NULL))
1187                 return -ENOMEM;
1188
1189         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190                                  DMA_FROM_DEVICE);
1191         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1192                 dev_kfree_skb(skb);
1193                 return -ENOMEM;
1194         }
1195
1196         rx_buf->skb = skb;
1197         dma_unmap_addr_set(rx_buf, mapping, mapping);
1198
1199         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202         return 0;
1203 }
1204
1205 /* note that we are not allocating a new skb,
1206  * we are just moving one from cons to prod
1207  * we are not creating a new mapping,
1208  * so there is no need to check for dma_mapping_error().
1209  */
1210 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211                                struct sk_buff *skb, u16 cons, u16 prod)
1212 {
1213         struct bnx2x *bp = fp->bp;
1214         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1219         dma_sync_single_for_device(&bp->pdev->dev,
1220                                    dma_unmap_addr(cons_rx_buf, mapping),
1221                                    RX_COPY_THRESH, DMA_FROM_DEVICE);
1222
1223         prod_rx_buf->skb = cons_rx_buf->skb;
1224         dma_unmap_addr_set(prod_rx_buf, mapping,
1225                            dma_unmap_addr(cons_rx_buf, mapping));
1226         *prod_bd = *cons_bd;
1227 }
1228
1229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230                                              u16 idx)
1231 {
1232         u16 last_max = fp->last_max_sge;
1233
1234         if (SUB_S16(idx, last_max) > 0)
1235                 fp->last_max_sge = idx;
1236 }
1237
1238 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239 {
1240         int i, j;
1241
1242         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243                 int idx = RX_SGE_CNT * i - 1;
1244
1245                 for (j = 0; j < 2; j++) {
1246                         SGE_MASK_CLEAR_BIT(fp, idx);
1247                         idx--;
1248                 }
1249         }
1250 }
1251
1252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253                                   struct eth_fast_path_rx_cqe *fp_cqe)
1254 {
1255         struct bnx2x *bp = fp->bp;
1256         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1258                       SGE_PAGE_SHIFT;
1259         u16 last_max, last_elem, first_elem;
1260         u16 delta = 0;
1261         u16 i;
1262
1263         if (!sge_len)
1264                 return;
1265
1266         /* First mark all used pages */
1267         for (i = 0; i < sge_len; i++)
1268                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273         /* Here we assume that the last SGE index is the biggest */
1274         prefetch((void *)(fp->sge_mask));
1275         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277         last_max = RX_SGE(fp->last_max_sge);
1278         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281         /* If ring is not full */
1282         if (last_elem + 1 != first_elem)
1283                 last_elem++;
1284
1285         /* Now update the prod */
1286         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287                 if (likely(fp->sge_mask[i]))
1288                         break;
1289
1290                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291                 delta += RX_SGE_MASK_ELEM_SZ;
1292         }
1293
1294         if (delta > 0) {
1295                 fp->rx_sge_prod += delta;
1296                 /* clear page-end entries */
1297                 bnx2x_clear_sge_mask_next_elems(fp);
1298         }
1299
1300         DP(NETIF_MSG_RX_STATUS,
1301            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1302            fp->last_max_sge, fp->rx_sge_prod);
1303 }
1304
1305 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306 {
1307         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308         memset(fp->sge_mask, 0xff,
1309                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
1311         /* Clear the two last indices in the page to 1:
1312            these are the indices that correspond to the "next" element,
1313            hence will never be indicated and should be removed from
1314            the calculations. */
1315         bnx2x_clear_sge_mask_next_elems(fp);
1316 }
1317
1318 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319                             struct sk_buff *skb, u16 cons, u16 prod)
1320 {
1321         struct bnx2x *bp = fp->bp;
1322         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325         dma_addr_t mapping;
1326
1327         /* move empty skb from pool to prod and map it */
1328         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330                                  bp->rx_buf_size, DMA_FROM_DEVICE);
1331         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1332
1333         /* move partial skb from cons to pool (don't unmap yet) */
1334         fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336         /* mark bin state as start - print error if current state != stop */
1337         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340         fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342         /* point prod_bd to new skb */
1343         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346 #ifdef BNX2X_STOP_ON_ERROR
1347         fp->tpa_queue_used |= (1 << queue);
1348 #ifdef _ASM_GENERIC_INT_L64_H
1349         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350 #else
1351         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352 #endif
1353            fp->tpa_queue_used);
1354 #endif
1355 }
1356
1357 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358                                struct sk_buff *skb,
1359                                struct eth_fast_path_rx_cqe *fp_cqe,
1360                                u16 cqe_idx)
1361 {
1362         struct sw_rx_page *rx_pg, old_rx_pg;
1363         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364         u32 i, frag_len, frag_size, pages;
1365         int err;
1366         int j;
1367
1368         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1370
1371         /* This is needed in order to enable forwarding support */
1372         if (frag_size)
1373                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374                                                max(frag_size, (u32)len_on_bd));
1375
1376 #ifdef BNX2X_STOP_ON_ERROR
1377         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379                           pages, cqe_idx);
1380                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1381                           fp_cqe->pkt_len, len_on_bd);
1382                 bnx2x_panic();
1383                 return -EINVAL;
1384         }
1385 #endif
1386
1387         /* Run through the SGL and compose the fragmented skb */
1388         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391                 /* FW gives the indices of the SGE as if the ring is an array
1392                    (meaning that "next" element will consume 2 indices) */
1393                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394                 rx_pg = &fp->rx_page_ring[sge_idx];
1395                 old_rx_pg = *rx_pg;
1396
1397                 /* If we fail to allocate a substitute page, we simply stop
1398                    where we are and drop the whole packet */
1399                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400                 if (unlikely(err)) {
1401                         fp->eth_q_stats.rx_skb_alloc_failed++;
1402                         return err;
1403                 }
1404
1405                 /* Unmap the page as we r going to pass it to the stack */
1406                 dma_unmap_page(&bp->pdev->dev,
1407                                dma_unmap_addr(&old_rx_pg, mapping),
1408                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1409
1410                 /* Add one frag and update the appropriate fields in the skb */
1411                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413                 skb->data_len += frag_len;
1414                 skb->truesize += frag_len;
1415                 skb->len += frag_len;
1416
1417                 frag_size -= frag_len;
1418         }
1419
1420         return 0;
1421 }
1422
1423 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425                            u16 cqe_idx)
1426 {
1427         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428         struct sk_buff *skb = rx_buf->skb;
1429         /* alloc new skb */
1430         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432         /* Unmap skb in the pool anyway, as we are going to change
1433            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434            fails. */
1435         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436                          bp->rx_buf_size, DMA_FROM_DEVICE);
1437
1438         if (likely(new_skb)) {
1439                 /* fix ip xsum and give it to the stack */
1440                 /* (no need to map the new skb) */
1441 #ifdef BCM_VLAN
1442                 int is_vlan_cqe =
1443                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444                          PARSING_FLAGS_VLAN);
1445                 int is_not_hwaccel_vlan_cqe =
1446                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447 #endif
1448
1449                 prefetch(skb);
1450                 prefetch(((char *)(skb)) + 128);
1451
1452 #ifdef BNX2X_STOP_ON_ERROR
1453                 if (pad + len > bp->rx_buf_size) {
1454                         BNX2X_ERR("skb_put is about to fail...  "
1455                                   "pad %d  len %d  rx_buf_size %d\n",
1456                                   pad, len, bp->rx_buf_size);
1457                         bnx2x_panic();
1458                         return;
1459                 }
1460 #endif
1461
1462                 skb_reserve(skb, pad);
1463                 skb_put(skb, len);
1464
1465                 skb->protocol = eth_type_trans(skb, bp->dev);
1466                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468                 {
1469                         struct iphdr *iph;
1470
1471                         iph = (struct iphdr *)skb->data;
1472 #ifdef BCM_VLAN
1473                         /* If there is no Rx VLAN offloading -
1474                            take VLAN tag into an account */
1475                         if (unlikely(is_not_hwaccel_vlan_cqe))
1476                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477 #endif
1478                         iph->check = 0;
1479                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480                 }
1481
1482                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483                                          &cqe->fast_path_cqe, cqe_idx)) {
1484 #ifdef BCM_VLAN
1485                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486                             (!is_not_hwaccel_vlan_cqe))
1487                                 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488                                                  le16_to_cpu(cqe->fast_path_cqe.
1489                                                              vlan_tag), skb);
1490                         else
1491 #endif
1492                                 napi_gro_receive(&fp->napi, skb);
1493                 } else {
1494                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495                            " - dropping packet!\n");
1496                         dev_kfree_skb(skb);
1497                 }
1498
1499
1500                 /* put new skb in bin */
1501                 fp->tpa_pool[queue].skb = new_skb;
1502
1503         } else {
1504                 /* else drop the packet and keep the buffer in the bin */
1505                 DP(NETIF_MSG_RX_STATUS,
1506                    "Failed to allocate new skb - dropping packet!\n");
1507                 fp->eth_q_stats.rx_skb_alloc_failed++;
1508         }
1509
1510         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511 }
1512
1513 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514                                         struct bnx2x_fastpath *fp,
1515                                         u16 bd_prod, u16 rx_comp_prod,
1516                                         u16 rx_sge_prod)
1517 {
1518         struct ustorm_eth_rx_producers rx_prods = {0};
1519         int i;
1520
1521         /* Update producers */
1522         rx_prods.bd_prod = bd_prod;
1523         rx_prods.cqe_prod = rx_comp_prod;
1524         rx_prods.sge_prod = rx_sge_prod;
1525
1526         /*
1527          * Make sure that the BD and SGE data is updated before updating the
1528          * producers since FW might read the BD/SGE right after the producer
1529          * is updated.
1530          * This is only applicable for weak-ordered memory model archs such
1531          * as IA-64. The following barrier is also mandatory since FW will
1532          * assumes BDs must have buffers.
1533          */
1534         wmb();
1535
1536         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537                 REG_WR(bp, BAR_USTRORM_INTMEM +
1538                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539                        ((u32 *)&rx_prods)[i]);
1540
1541         mmiowb(); /* keep prod updates ordered */
1542
1543         DP(NETIF_MSG_RX_STATUS,
1544            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1545            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1546 }
1547
1548 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1549 {
1550         struct bnx2x *bp = fp->bp;
1551         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1552         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1553         int rx_pkt = 0;
1554
1555 #ifdef BNX2X_STOP_ON_ERROR
1556         if (unlikely(bp->panic))
1557                 return 0;
1558 #endif
1559
1560         /* CQ "next element" is of the size of the regular element,
1561            that's why it's ok here */
1562         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1564                 hw_comp_cons++;
1565
1566         bd_cons = fp->rx_bd_cons;
1567         bd_prod = fp->rx_bd_prod;
1568         bd_prod_fw = bd_prod;
1569         sw_comp_cons = fp->rx_comp_cons;
1570         sw_comp_prod = fp->rx_comp_prod;
1571
1572         /* Memory barrier necessary as speculative reads of the rx
1573          * buffer can be ahead of the index in the status block
1574          */
1575         rmb();
1576
1577         DP(NETIF_MSG_RX_STATUS,
1578            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1579            fp->index, hw_comp_cons, sw_comp_cons);
1580
1581         while (sw_comp_cons != hw_comp_cons) {
1582                 struct sw_rx_bd *rx_buf = NULL;
1583                 struct sk_buff *skb;
1584                 union eth_rx_cqe *cqe;
1585                 u8 cqe_fp_flags, cqe_fp_status_flags;
1586                 u16 len, pad;
1587
1588                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589                 bd_prod = RX_BD(bd_prod);
1590                 bd_cons = RX_BD(bd_cons);
1591
1592                 /* Prefetch the page containing the BD descriptor
1593                    at producer's index. It will be needed when new skb is
1594                    allocated */
1595                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596                                              (&fp->rx_desc_ring[bd_prod])) -
1597                                   PAGE_SIZE + 1));
1598
1599                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1600                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1601                 cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
1602
1603                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1604                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1605                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1606                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1607                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1608                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1609
1610                 /* is this a slowpath msg? */
1611                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1612                         bnx2x_sp_event(fp, cqe);
1613                         goto next_cqe;
1614
1615                 /* this is an rx packet */
1616                 } else {
1617                         rx_buf = &fp->rx_buf_ring[bd_cons];
1618                         skb = rx_buf->skb;
1619                         prefetch(skb);
1620                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621                         pad = cqe->fast_path_cqe.placement_offset;
1622
1623                         /* If CQE is marked both TPA_START and TPA_END
1624                            it is a non-TPA CQE */
1625                         if ((!fp->disable_tpa) &&
1626                             (TPA_TYPE(cqe_fp_flags) !=
1627                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1628                                 u16 queue = cqe->fast_path_cqe.queue_index;
1629
1630                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631                                         DP(NETIF_MSG_RX_STATUS,
1632                                            "calling tpa_start on queue %d\n",
1633                                            queue);
1634
1635                                         bnx2x_tpa_start(fp, queue, skb,
1636                                                         bd_cons, bd_prod);
1637                                         goto next_rx;
1638                                 }
1639
1640                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641                                         DP(NETIF_MSG_RX_STATUS,
1642                                            "calling tpa_stop on queue %d\n",
1643                                            queue);
1644
1645                                         if (!BNX2X_RX_SUM_FIX(cqe))
1646                                                 BNX2X_ERR("STOP on none TCP "
1647                                                           "data\n");
1648
1649                                         /* This is a size of the linear data
1650                                            on this skb */
1651                                         len = le16_to_cpu(cqe->fast_path_cqe.
1652                                                                 len_on_bd);
1653                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1654                                                     len, cqe, comp_ring_cons);
1655 #ifdef BNX2X_STOP_ON_ERROR
1656                                         if (bp->panic)
1657                                                 return 0;
1658 #endif
1659
1660                                         bnx2x_update_sge_prod(fp,
1661                                                         &cqe->fast_path_cqe);
1662                                         goto next_cqe;
1663                                 }
1664                         }
1665
1666                         dma_sync_single_for_device(&bp->pdev->dev,
1667                                         dma_unmap_addr(rx_buf, mapping),
1668                                                    pad + RX_COPY_THRESH,
1669                                                    DMA_FROM_DEVICE);
1670                         prefetch(((char *)(skb)) + 128);
1671
1672                         /* is this an error packet? */
1673                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1674                                 DP(NETIF_MSG_RX_ERR,
1675                                    "ERROR  flags %x  rx packet %u\n",
1676                                    cqe_fp_flags, sw_comp_cons);
1677                                 fp->eth_q_stats.rx_err_discard_pkt++;
1678                                 goto reuse_rx;
1679                         }
1680
1681                         /* Since we don't have a jumbo ring
1682                          * copy small packets if mtu > 1500
1683                          */
1684                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1685                             (len <= RX_COPY_THRESH)) {
1686                                 struct sk_buff *new_skb;
1687
1688                                 new_skb = netdev_alloc_skb(bp->dev,
1689                                                            len + pad);
1690                                 if (new_skb == NULL) {
1691                                         DP(NETIF_MSG_RX_ERR,
1692                                            "ERROR  packet dropped "
1693                                            "because of alloc failure\n");
1694                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1695                                         goto reuse_rx;
1696                                 }
1697
1698                                 /* aligned copy */
1699                                 skb_copy_from_linear_data_offset(skb, pad,
1700                                                     new_skb->data + pad, len);
1701                                 skb_reserve(new_skb, pad);
1702                                 skb_put(new_skb, len);
1703
1704                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1705
1706                                 skb = new_skb;
1707
1708                         } else
1709                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1710                                 dma_unmap_single(&bp->pdev->dev,
1711                                         dma_unmap_addr(rx_buf, mapping),
1712                                                  bp->rx_buf_size,
1713                                                  DMA_FROM_DEVICE);
1714                                 skb_reserve(skb, pad);
1715                                 skb_put(skb, len);
1716
1717                         } else {
1718                                 DP(NETIF_MSG_RX_ERR,
1719                                    "ERROR  packet dropped because "
1720                                    "of alloc failure\n");
1721                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1722 reuse_rx:
1723                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1724                                 goto next_rx;
1725                         }
1726
1727                         skb->protocol = eth_type_trans(skb, bp->dev);
1728
1729                         if ((bp->dev->features & NETIF_F_RXHASH) &&
1730                             (cqe_fp_status_flags &
1731                              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1732                                 skb->rxhash = le32_to_cpu(
1733                                     cqe->fast_path_cqe.rss_hash_result);
1734
1735                         skb->ip_summed = CHECKSUM_NONE;
1736                         if (bp->rx_csum) {
1737                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1738                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1739                                 else
1740                                         fp->eth_q_stats.hw_csum_err++;
1741                         }
1742                 }
1743
1744                 skb_record_rx_queue(skb, fp->index);
1745
1746 #ifdef BCM_VLAN
1747                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1748                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1749                      PARSING_FLAGS_VLAN))
1750                         vlan_gro_receive(&fp->napi, bp->vlgrp,
1751                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1752                 else
1753 #endif
1754                         napi_gro_receive(&fp->napi, skb);
1755
1756
1757 next_rx:
1758                 rx_buf->skb = NULL;
1759
1760                 bd_cons = NEXT_RX_IDX(bd_cons);
1761                 bd_prod = NEXT_RX_IDX(bd_prod);
1762                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1763                 rx_pkt++;
1764 next_cqe:
1765                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1766                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1767
1768                 if (rx_pkt == budget)
1769                         break;
1770         } /* while */
1771
1772         fp->rx_bd_cons = bd_cons;
1773         fp->rx_bd_prod = bd_prod_fw;
1774         fp->rx_comp_cons = sw_comp_cons;
1775         fp->rx_comp_prod = sw_comp_prod;
1776
1777         /* Update producers */
1778         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1779                              fp->rx_sge_prod);
1780
1781         fp->rx_pkt += rx_pkt;
1782         fp->rx_calls++;
1783
1784         return rx_pkt;
1785 }
1786
1787 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1788 {
1789         struct bnx2x_fastpath *fp = fp_cookie;
1790         struct bnx2x *bp = fp->bp;
1791
1792         /* Return here if interrupt is disabled */
1793         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1794                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1795                 return IRQ_HANDLED;
1796         }
1797
1798         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1799            fp->index, fp->sb_id);
1800         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1801
1802 #ifdef BNX2X_STOP_ON_ERROR
1803         if (unlikely(bp->panic))
1804                 return IRQ_HANDLED;
1805 #endif
1806
1807         /* Handle Rx and Tx according to MSI-X vector */
1808         prefetch(fp->rx_cons_sb);
1809         prefetch(fp->tx_cons_sb);
1810         prefetch(&fp->status_blk->u_status_block.status_block_index);
1811         prefetch(&fp->status_blk->c_status_block.status_block_index);
1812         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1813
1814         return IRQ_HANDLED;
1815 }
1816
1817 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1818 {
1819         struct bnx2x *bp = netdev_priv(dev_instance);
1820         u16 status = bnx2x_ack_int(bp);
1821         u16 mask;
1822         int i;
1823
1824         /* Return here if interrupt is shared and it's not for us */
1825         if (unlikely(status == 0)) {
1826                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1827                 return IRQ_NONE;
1828         }
1829         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1830
1831         /* Return here if interrupt is disabled */
1832         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1833                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1834                 return IRQ_HANDLED;
1835         }
1836
1837 #ifdef BNX2X_STOP_ON_ERROR
1838         if (unlikely(bp->panic))
1839                 return IRQ_HANDLED;
1840 #endif
1841
1842         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1843                 struct bnx2x_fastpath *fp = &bp->fp[i];
1844
1845                 mask = 0x2 << fp->sb_id;
1846                 if (status & mask) {
1847                         /* Handle Rx and Tx according to SB id */
1848                         prefetch(fp->rx_cons_sb);
1849                         prefetch(&fp->status_blk->u_status_block.
1850                                                 status_block_index);
1851                         prefetch(fp->tx_cons_sb);
1852                         prefetch(&fp->status_blk->c_status_block.
1853                                                 status_block_index);
1854                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1855                         status &= ~mask;
1856                 }
1857         }
1858
1859 #ifdef BCM_CNIC
1860         mask = 0x2 << CNIC_SB_ID(bp);
1861         if (status & (mask | 0x1)) {
1862                 struct cnic_ops *c_ops = NULL;
1863
1864                 rcu_read_lock();
1865                 c_ops = rcu_dereference(bp->cnic_ops);
1866                 if (c_ops)
1867                         c_ops->cnic_handler(bp->cnic_data, NULL);
1868                 rcu_read_unlock();
1869
1870                 status &= ~mask;
1871         }
1872 #endif
1873
1874         if (unlikely(status & 0x1)) {
1875                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1876
1877                 status &= ~0x1;
1878                 if (!status)
1879                         return IRQ_HANDLED;
1880         }
1881
1882         if (unlikely(status))
1883                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1884                    status);
1885
1886         return IRQ_HANDLED;
1887 }
1888
1889 /* end of fast path */
1890
1891 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1892
1893 /* Link */
1894
1895 /*
1896  * General service functions
1897  */
1898
1899 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1900 {
1901         u32 lock_status;
1902         u32 resource_bit = (1 << resource);
1903         int func = BP_FUNC(bp);
1904         u32 hw_lock_control_reg;
1905         int cnt;
1906
1907         /* Validating that the resource is within range */
1908         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1909                 DP(NETIF_MSG_HW,
1910                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1911                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1912                 return -EINVAL;
1913         }
1914
1915         if (func <= 5) {
1916                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1917         } else {
1918                 hw_lock_control_reg =
1919                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1920         }
1921
1922         /* Validating that the resource is not already taken */
1923         lock_status = REG_RD(bp, hw_lock_control_reg);
1924         if (lock_status & resource_bit) {
1925                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1926                    lock_status, resource_bit);
1927                 return -EEXIST;
1928         }
1929
1930         /* Try for 5 second every 5ms */
1931         for (cnt = 0; cnt < 1000; cnt++) {
1932                 /* Try to acquire the lock */
1933                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1934                 lock_status = REG_RD(bp, hw_lock_control_reg);
1935                 if (lock_status & resource_bit)
1936                         return 0;
1937
1938                 msleep(5);
1939         }
1940         DP(NETIF_MSG_HW, "Timeout\n");
1941         return -EAGAIN;
1942 }
1943
1944 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1945 {
1946         u32 lock_status;
1947         u32 resource_bit = (1 << resource);
1948         int func = BP_FUNC(bp);
1949         u32 hw_lock_control_reg;
1950
1951         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1952
1953         /* Validating that the resource is within range */
1954         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1955                 DP(NETIF_MSG_HW,
1956                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1957                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1958                 return -EINVAL;
1959         }
1960
1961         if (func <= 5) {
1962                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1963         } else {
1964                 hw_lock_control_reg =
1965                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1966         }
1967
1968         /* Validating that the resource is currently taken */
1969         lock_status = REG_RD(bp, hw_lock_control_reg);
1970         if (!(lock_status & resource_bit)) {
1971                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1972                    lock_status, resource_bit);
1973                 return -EFAULT;
1974         }
1975
1976         REG_WR(bp, hw_lock_control_reg, resource_bit);
1977         return 0;
1978 }
1979
1980 /* HW Lock for shared dual port PHYs */
1981 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1982 {
1983         mutex_lock(&bp->port.phy_mutex);
1984
1985         if (bp->port.need_hw_lock)
1986                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1987 }
1988
1989 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1990 {
1991         if (bp->port.need_hw_lock)
1992                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1993
1994         mutex_unlock(&bp->port.phy_mutex);
1995 }
1996
1997 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1998 {
1999         /* The GPIO should be swapped if swap register is set and active */
2000         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2001                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2002         int gpio_shift = gpio_num +
2003                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2004         u32 gpio_mask = (1 << gpio_shift);
2005         u32 gpio_reg;
2006         int value;
2007
2008         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2009                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2010                 return -EINVAL;
2011         }
2012
2013         /* read GPIO value */
2014         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2015
2016         /* get the requested pin value */
2017         if ((gpio_reg & gpio_mask) == gpio_mask)
2018                 value = 1;
2019         else
2020                 value = 0;
2021
2022         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
2023
2024         return value;
2025 }
2026
2027 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2028 {
2029         /* The GPIO should be swapped if swap register is set and active */
2030         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2031                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2032         int gpio_shift = gpio_num +
2033                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2034         u32 gpio_mask = (1 << gpio_shift);
2035         u32 gpio_reg;
2036
2037         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2038                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2039                 return -EINVAL;
2040         }
2041
2042         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2043         /* read GPIO and mask except the float bits */
2044         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2045
2046         switch (mode) {
2047         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2048                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2049                    gpio_num, gpio_shift);
2050                 /* clear FLOAT and set CLR */
2051                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2052                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2053                 break;
2054
2055         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2056                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2057                    gpio_num, gpio_shift);
2058                 /* clear FLOAT and set SET */
2059                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2060                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2061                 break;
2062
2063         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2064                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2065                    gpio_num, gpio_shift);
2066                 /* set FLOAT */
2067                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2068                 break;
2069
2070         default:
2071                 break;
2072         }
2073
2074         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2075         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2076
2077         return 0;
2078 }
2079
2080 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2081 {
2082         /* The GPIO should be swapped if swap register is set and active */
2083         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2084                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2085         int gpio_shift = gpio_num +
2086                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2087         u32 gpio_mask = (1 << gpio_shift);
2088         u32 gpio_reg;
2089
2090         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2091                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2092                 return -EINVAL;
2093         }
2094
2095         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2096         /* read GPIO int */
2097         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2098
2099         switch (mode) {
2100         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2101                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2102                                    "output low\n", gpio_num, gpio_shift);
2103                 /* clear SET and set CLR */
2104                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2105                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2106                 break;
2107
2108         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2109                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2110                                    "output high\n", gpio_num, gpio_shift);
2111                 /* clear CLR and set SET */
2112                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2113                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2114                 break;
2115
2116         default:
2117                 break;
2118         }
2119
2120         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2121         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2122
2123         return 0;
2124 }
2125
2126 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2127 {
2128         u32 spio_mask = (1 << spio_num);
2129         u32 spio_reg;
2130
2131         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2132             (spio_num > MISC_REGISTERS_SPIO_7)) {
2133                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2134                 return -EINVAL;
2135         }
2136
2137         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2138         /* read SPIO and mask except the float bits */
2139         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2140
2141         switch (mode) {
2142         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2143                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2144                 /* clear FLOAT and set CLR */
2145                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2146                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2147                 break;
2148
2149         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2150                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2151                 /* clear FLOAT and set SET */
2152                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2153                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2154                 break;
2155
2156         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2157                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2158                 /* set FLOAT */
2159                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2160                 break;
2161
2162         default:
2163                 break;
2164         }
2165
2166         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2167         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2168
2169         return 0;
2170 }
2171
2172 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2173 {
2174         switch (bp->link_vars.ieee_fc &
2175                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2176         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2177                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2178                                           ADVERTISED_Pause);
2179                 break;
2180
2181         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2182                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2183                                          ADVERTISED_Pause);
2184                 break;
2185
2186         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2187                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2188                 break;
2189
2190         default:
2191                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2192                                           ADVERTISED_Pause);
2193                 break;
2194         }
2195 }
2196
2197 static void bnx2x_link_report(struct bnx2x *bp)
2198 {
2199         if (bp->flags & MF_FUNC_DIS) {
2200                 netif_carrier_off(bp->dev);
2201                 netdev_err(bp->dev, "NIC Link is Down\n");
2202                 return;
2203         }
2204
2205         if (bp->link_vars.link_up) {
2206                 u16 line_speed;
2207
2208                 if (bp->state == BNX2X_STATE_OPEN)
2209                         netif_carrier_on(bp->dev);
2210                 netdev_info(bp->dev, "NIC Link is Up, ");
2211
2212                 line_speed = bp->link_vars.line_speed;
2213                 if (IS_E1HMF(bp)) {
2214                         u16 vn_max_rate;
2215
2216                         vn_max_rate =
2217                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2218                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2219                         if (vn_max_rate < line_speed)
2220                                 line_speed = vn_max_rate;
2221                 }
2222                 pr_cont("%d Mbps ", line_speed);
2223
2224                 if (bp->link_vars.duplex == DUPLEX_FULL)
2225                         pr_cont("full duplex");
2226                 else
2227                         pr_cont("half duplex");
2228
2229                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2230                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2231                                 pr_cont(", receive ");
2232                                 if (bp->link_vars.flow_ctrl &
2233                                     BNX2X_FLOW_CTRL_TX)
2234                                         pr_cont("& transmit ");
2235                         } else {
2236                                 pr_cont(", transmit ");
2237                         }
2238                         pr_cont("flow control ON");
2239                 }
2240                 pr_cont("\n");
2241
2242         } else { /* link_down */
2243                 netif_carrier_off(bp->dev);
2244                 netdev_err(bp->dev, "NIC Link is Down\n");
2245         }
2246 }
2247
2248 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2249 {
2250         if (!BP_NOMCP(bp)) {
2251                 u8 rc;
2252
2253                 /* Initialize link parameters structure variables */
2254                 /* It is recommended to turn off RX FC for jumbo frames
2255                    for better performance */
2256                 if (bp->dev->mtu > 5000)
2257                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2258                 else
2259                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2260
2261                 bnx2x_acquire_phy_lock(bp);
2262
2263                 if (load_mode == LOAD_DIAG)
2264                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2265
2266                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2267
2268                 bnx2x_release_phy_lock(bp);
2269
2270                 bnx2x_calc_fc_adv(bp);
2271
2272                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2273                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2274                         bnx2x_link_report(bp);
2275                 }
2276
2277                 return rc;
2278         }
2279         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2280         return -EINVAL;
2281 }
2282
2283 static void bnx2x_link_set(struct bnx2x *bp)
2284 {
2285         if (!BP_NOMCP(bp)) {
2286                 bnx2x_acquire_phy_lock(bp);
2287                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2288                 bnx2x_release_phy_lock(bp);
2289
2290                 bnx2x_calc_fc_adv(bp);
2291         } else
2292                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2293 }
2294
2295 static void bnx2x__link_reset(struct bnx2x *bp)
2296 {
2297         if (!BP_NOMCP(bp)) {
2298                 bnx2x_acquire_phy_lock(bp);
2299                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2300                 bnx2x_release_phy_lock(bp);
2301         } else
2302                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2303 }
2304
2305 static u8 bnx2x_link_test(struct bnx2x *bp)
2306 {
2307         u8 rc = 0;
2308
2309         if (!BP_NOMCP(bp)) {
2310                 bnx2x_acquire_phy_lock(bp);
2311                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2312                 bnx2x_release_phy_lock(bp);
2313         } else
2314                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2315
2316         return rc;
2317 }
2318
2319 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2320 {
2321         u32 r_param = bp->link_vars.line_speed / 8;
2322         u32 fair_periodic_timeout_usec;
2323         u32 t_fair;
2324
2325         memset(&(bp->cmng.rs_vars), 0,
2326                sizeof(struct rate_shaping_vars_per_port));
2327         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2328
2329         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2330         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2331
2332         /* this is the threshold below which no timer arming will occur
2333            1.25 coefficient is for the threshold to be a little bigger
2334            than the real time, to compensate for timer in-accuracy */
2335         bp->cmng.rs_vars.rs_threshold =
2336                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2337
2338         /* resolution of fairness timer */
2339         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2340         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2341         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2342
2343         /* this is the threshold below which we won't arm the timer anymore */
2344         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2345
2346         /* we multiply by 1e3/8 to get bytes/msec.
2347            We don't want the credits to pass a credit
2348            of the t_fair*FAIR_MEM (algorithm resolution) */
2349         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2350         /* since each tick is 4 usec */
2351         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2352 }
2353
2354 /* Calculates the sum of vn_min_rates.
2355    It's needed for further normalizing of the min_rates.
2356    Returns:
2357      sum of vn_min_rates.
2358        or
2359      0 - if all the min_rates are 0.
2360      In the later case fainess algorithm should be deactivated.
2361      If not all min_rates are zero then those that are zeroes will be set to 1.
2362  */
2363 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2364 {
2365         int all_zero = 1;
2366         int port = BP_PORT(bp);
2367         int vn;
2368
2369         bp->vn_weight_sum = 0;
2370         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2371                 int func = 2*vn + port;
2372                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2373                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2374                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2375
2376                 /* Skip hidden vns */
2377                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2378                         continue;
2379
2380                 /* If min rate is zero - set it to 1 */
2381                 if (!vn_min_rate)
2382                         vn_min_rate = DEF_MIN_RATE;
2383                 else
2384                         all_zero = 0;
2385
2386                 bp->vn_weight_sum += vn_min_rate;
2387         }
2388
2389         /* ... only if all min rates are zeros - disable fairness */
2390         if (all_zero) {
2391                 bp->cmng.flags.cmng_enables &=
2392                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2393                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2394                    "  fairness will be disabled\n");
2395         } else
2396                 bp->cmng.flags.cmng_enables |=
2397                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2398 }
2399
2400 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2401 {
2402         struct rate_shaping_vars_per_vn m_rs_vn;
2403         struct fairness_vars_per_vn m_fair_vn;
2404         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2405         u16 vn_min_rate, vn_max_rate;
2406         int i;
2407
2408         /* If function is hidden - set min and max to zeroes */
2409         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2410                 vn_min_rate = 0;
2411                 vn_max_rate = 0;
2412
2413         } else {
2414                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2415                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2416                 /* If min rate is zero - set it to 1 */
2417                 if (!vn_min_rate)
2418                         vn_min_rate = DEF_MIN_RATE;
2419                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2420                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2421         }
2422         DP(NETIF_MSG_IFUP,
2423            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2424            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2425
2426         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2427         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2428
2429         /* global vn counter - maximal Mbps for this vn */
2430         m_rs_vn.vn_counter.rate = vn_max_rate;
2431
2432         /* quota - number of bytes transmitted in this period */
2433         m_rs_vn.vn_counter.quota =
2434                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2435
2436         if (bp->vn_weight_sum) {
2437                 /* credit for each period of the fairness algorithm:
2438                    number of bytes in T_FAIR (the vn share the port rate).
2439                    vn_weight_sum should not be larger than 10000, thus
2440                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2441                    than zero */
2442                 m_fair_vn.vn_credit_delta =
2443                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2444                                                    (8 * bp->vn_weight_sum))),
2445                               (bp->cmng.fair_vars.fair_threshold * 2));
2446                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2447                    m_fair_vn.vn_credit_delta);
2448         }
2449
2450         /* Store it to internal memory */
2451         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2452                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2454                        ((u32 *)(&m_rs_vn))[i]);
2455
2456         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2457                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2458                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2459                        ((u32 *)(&m_fair_vn))[i]);
2460 }
2461
2462
2463 /* This function is called upon link interrupt */
2464 static void bnx2x_link_attn(struct bnx2x *bp)
2465 {
2466         u32 prev_link_status = bp->link_vars.link_status;
2467         /* Make sure that we are synced with the current statistics */
2468         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2469
2470         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2471
2472         if (bp->link_vars.link_up) {
2473
2474                 /* dropless flow control */
2475                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2476                         int port = BP_PORT(bp);
2477                         u32 pause_enabled = 0;
2478
2479                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2480                                 pause_enabled = 1;
2481
2482                         REG_WR(bp, BAR_USTRORM_INTMEM +
2483                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2484                                pause_enabled);
2485                 }
2486
2487                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2488                         struct host_port_stats *pstats;
2489
2490                         pstats = bnx2x_sp(bp, port_stats);
2491                         /* reset old bmac stats */
2492                         memset(&(pstats->mac_stx[0]), 0,
2493                                sizeof(struct mac_stx));
2494                 }
2495                 if (bp->state == BNX2X_STATE_OPEN)
2496                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2497         }
2498
2499         /* indicate link status only if link status actually changed */
2500         if (prev_link_status != bp->link_vars.link_status)
2501                 bnx2x_link_report(bp);
2502
2503         if (IS_E1HMF(bp)) {
2504                 int port = BP_PORT(bp);
2505                 int func;
2506                 int vn;
2507
2508                 /* Set the attention towards other drivers on the same port */
2509                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2510                         if (vn == BP_E1HVN(bp))
2511                                 continue;
2512
2513                         func = ((vn << 1) | port);
2514                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2515                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2516                 }
2517
2518                 if (bp->link_vars.link_up) {
2519                         int i;
2520
2521                         /* Init rate shaping and fairness contexts */
2522                         bnx2x_init_port_minmax(bp);
2523
2524                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2525                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2526
2527                         /* Store it to internal memory */
2528                         for (i = 0;
2529                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2530                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2531                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2532                                        ((u32 *)(&bp->cmng))[i]);
2533                 }
2534         }
2535 }
2536
2537 static void bnx2x__link_status_update(struct bnx2x *bp)
2538 {
2539         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2540                 return;
2541
2542         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2543
2544         if (bp->link_vars.link_up)
2545                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2546         else
2547                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2548
2549         bnx2x_calc_vn_weight_sum(bp);
2550
2551         /* indicate link status */
2552         bnx2x_link_report(bp);
2553 }
2554
2555 static void bnx2x_pmf_update(struct bnx2x *bp)
2556 {
2557         int port = BP_PORT(bp);
2558         u32 val;
2559
2560         bp->port.pmf = 1;
2561         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2562
2563         /* enable nig attention */
2564         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2565         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2566         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2567
2568         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2569 }
2570
2571 /* end of Link */
2572
2573 /* slow path */
2574
2575 /*
2576  * General service functions
2577  */
2578
2579 /* send the MCP a request, block until there is a reply */
2580 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2581 {
2582         int func = BP_FUNC(bp);
2583         u32 seq = ++bp->fw_seq;
2584         u32 rc = 0;
2585         u32 cnt = 1;
2586         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2587
2588         mutex_lock(&bp->fw_mb_mutex);
2589         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2590         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2591
2592         do {
2593                 /* let the FW do it's magic ... */
2594                 msleep(delay);
2595
2596                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2597
2598                 /* Give the FW up to 5 second (500*10ms) */
2599         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2600
2601         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2602            cnt*delay, rc, seq);
2603
2604         /* is this a reply to our command? */
2605         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2606                 rc &= FW_MSG_CODE_MASK;
2607         else {
2608                 /* FW BUG! */
2609                 BNX2X_ERR("FW failed to respond!\n");
2610                 bnx2x_fw_dump(bp);
2611                 rc = 0;
2612         }
2613         mutex_unlock(&bp->fw_mb_mutex);
2614
2615         return rc;
2616 }
2617
2618 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2619 static void bnx2x_set_rx_mode(struct net_device *dev);
2620
2621 static void bnx2x_e1h_disable(struct bnx2x *bp)
2622 {
2623         int port = BP_PORT(bp);
2624
2625         netif_tx_disable(bp->dev);
2626
2627         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2628
2629         netif_carrier_off(bp->dev);
2630 }
2631
2632 static void bnx2x_e1h_enable(struct bnx2x *bp)
2633 {
2634         int port = BP_PORT(bp);
2635
2636         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2637
2638         /* Tx queue should be only reenabled */
2639         netif_tx_wake_all_queues(bp->dev);
2640
2641         /*
2642          * Should not call netif_carrier_on since it will be called if the link
2643          * is up when checking for link state
2644          */
2645 }
2646
2647 static void bnx2x_update_min_max(struct bnx2x *bp)
2648 {
2649         int port = BP_PORT(bp);
2650         int vn, i;
2651
2652         /* Init rate shaping and fairness contexts */
2653         bnx2x_init_port_minmax(bp);
2654
2655         bnx2x_calc_vn_weight_sum(bp);
2656
2657         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2658                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2659
2660         if (bp->port.pmf) {
2661                 int func;
2662
2663                 /* Set the attention towards other drivers on the same port */
2664                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2665                         if (vn == BP_E1HVN(bp))
2666                                 continue;
2667
2668                         func = ((vn << 1) | port);
2669                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2670                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2671                 }
2672
2673                 /* Store it to internal memory */
2674                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2675                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2676                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2677                                ((u32 *)(&bp->cmng))[i]);
2678         }
2679 }
2680
2681 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2682 {
2683         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2684
2685         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2686
2687                 /*
2688                  * This is the only place besides the function initialization
2689                  * where the bp->flags can change so it is done without any
2690                  * locks
2691                  */
2692                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2693                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2694                         bp->flags |= MF_FUNC_DIS;
2695
2696                         bnx2x_e1h_disable(bp);
2697                 } else {
2698                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2699                         bp->flags &= ~MF_FUNC_DIS;
2700
2701                         bnx2x_e1h_enable(bp);
2702                 }
2703                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2704         }
2705         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2706
2707                 bnx2x_update_min_max(bp);
2708                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2709         }
2710
2711         /* Report results to MCP */
2712         if (dcc_event)
2713                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2714         else
2715                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2716 }
2717
2718 /* must be called under the spq lock */
2719 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2720 {
2721         struct eth_spe *next_spe = bp->spq_prod_bd;
2722
2723         if (bp->spq_prod_bd == bp->spq_last_bd) {
2724                 bp->spq_prod_bd = bp->spq;
2725                 bp->spq_prod_idx = 0;
2726                 DP(NETIF_MSG_TIMER, "end of spq\n");
2727         } else {
2728                 bp->spq_prod_bd++;
2729                 bp->spq_prod_idx++;
2730         }
2731         return next_spe;
2732 }
2733
2734 /* must be called under the spq lock */
2735 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2736 {
2737         int func = BP_FUNC(bp);
2738
2739         /* Make sure that BD data is updated before writing the producer */
2740         wmb();
2741
2742         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2743                bp->spq_prod_idx);
2744         mmiowb();
2745 }
2746
2747 /* the slow path queue is odd since completions arrive on the fastpath ring */
2748 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2749                          u32 data_hi, u32 data_lo, int common)
2750 {
2751         struct eth_spe *spe;
2752
2753 #ifdef BNX2X_STOP_ON_ERROR
2754         if (unlikely(bp->panic))
2755                 return -EIO;
2756 #endif
2757
2758         spin_lock_bh(&bp->spq_lock);
2759
2760         if (!bp->spq_left) {
2761                 BNX2X_ERR("BUG! SPQ ring full!\n");
2762                 spin_unlock_bh(&bp->spq_lock);
2763                 bnx2x_panic();
2764                 return -EBUSY;
2765         }
2766
2767         spe = bnx2x_sp_get_next(bp);
2768
2769         /* CID needs port number to be encoded int it */
2770         spe->hdr.conn_and_cmd_data =
2771                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2772                                     HW_CID(bp, cid));
2773         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2774         if (common)
2775                 spe->hdr.type |=
2776                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2777
2778         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2779         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2780
2781         bp->spq_left--;
2782
2783         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2784            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2785            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2786            (u32)(U64_LO(bp->spq_mapping) +
2787            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2788            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2789
2790         bnx2x_sp_prod_update(bp);
2791         spin_unlock_bh(&bp->spq_lock);
2792         return 0;
2793 }
2794
2795 /* acquire split MCP access lock register */
2796 static int bnx2x_acquire_alr(struct bnx2x *bp)
2797 {
2798         u32 j, val;
2799         int rc = 0;
2800
2801         might_sleep();
2802         for (j = 0; j < 1000; j++) {
2803                 val = (1UL << 31);
2804                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2805                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2806                 if (val & (1L << 31))
2807                         break;
2808
2809                 msleep(5);
2810         }
2811         if (!(val & (1L << 31))) {
2812                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2813                 rc = -EBUSY;
2814         }
2815
2816         return rc;
2817 }
2818
2819 /* release split MCP access lock register */
2820 static void bnx2x_release_alr(struct bnx2x *bp)
2821 {
2822         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2823 }
2824
2825 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2826 {
2827         struct host_def_status_block *def_sb = bp->def_status_blk;
2828         u16 rc = 0;
2829
2830         barrier(); /* status block is written to by the chip */
2831         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2832                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2833                 rc |= 1;
2834         }
2835         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2836                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2837                 rc |= 2;
2838         }
2839         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2840                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2841                 rc |= 4;
2842         }
2843         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2844                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2845                 rc |= 8;
2846         }
2847         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2848                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2849                 rc |= 16;
2850         }
2851         return rc;
2852 }
2853
2854 /*
2855  * slow path service functions
2856  */
2857
2858 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2859 {
2860         int port = BP_PORT(bp);
2861         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2862                        COMMAND_REG_ATTN_BITS_SET);
2863         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2864                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2865         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2866                                        NIG_REG_MASK_INTERRUPT_PORT0;
2867         u32 aeu_mask;
2868         u32 nig_mask = 0;
2869
2870         if (bp->attn_state & asserted)
2871                 BNX2X_ERR("IGU ERROR\n");
2872
2873         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2874         aeu_mask = REG_RD(bp, aeu_addr);
2875
2876         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2877            aeu_mask, asserted);
2878         aeu_mask &= ~(asserted & 0x3ff);
2879         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2880
2881         REG_WR(bp, aeu_addr, aeu_mask);
2882         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2883
2884         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2885         bp->attn_state |= asserted;
2886         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2887
2888         if (asserted & ATTN_HARD_WIRED_MASK) {
2889                 if (asserted & ATTN_NIG_FOR_FUNC) {
2890
2891                         bnx2x_acquire_phy_lock(bp);
2892
2893                         /* save nig interrupt mask */
2894                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2895                         REG_WR(bp, nig_int_mask_addr, 0);
2896
2897                         bnx2x_link_attn(bp);
2898
2899                         /* handle unicore attn? */
2900                 }
2901                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2902                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2903
2904                 if (asserted & GPIO_2_FUNC)
2905                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2906
2907                 if (asserted & GPIO_3_FUNC)
2908                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2909
2910                 if (asserted & GPIO_4_FUNC)
2911                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2912
2913                 if (port == 0) {
2914                         if (asserted & ATTN_GENERAL_ATTN_1) {
2915                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2916                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2917                         }
2918                         if (asserted & ATTN_GENERAL_ATTN_2) {
2919                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2920                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2921                         }
2922                         if (asserted & ATTN_GENERAL_ATTN_3) {
2923                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2924                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2925                         }
2926                 } else {
2927                         if (asserted & ATTN_GENERAL_ATTN_4) {
2928                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2929                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2930                         }
2931                         if (asserted & ATTN_GENERAL_ATTN_5) {
2932                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2933                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2934                         }
2935                         if (asserted & ATTN_GENERAL_ATTN_6) {
2936                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2937                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2938                         }
2939                 }
2940
2941         } /* if hardwired */
2942
2943         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2944            asserted, hc_addr);
2945         REG_WR(bp, hc_addr, asserted);
2946
2947         /* now set back the mask */
2948         if (asserted & ATTN_NIG_FOR_FUNC) {
2949                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2950                 bnx2x_release_phy_lock(bp);
2951         }
2952 }
2953
2954 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2955 {
2956         int port = BP_PORT(bp);
2957
2958         /* mark the failure */
2959         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2960         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2961         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2962                  bp->link_params.ext_phy_config);
2963
2964         /* log the failure */
2965         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2966                " the driver to shutdown the card to prevent permanent"
2967                " damage.  Please contact OEM Support for assistance\n");
2968 }
2969
2970 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2971 {
2972         int port = BP_PORT(bp);
2973         int reg_offset;
2974         u32 val, swap_val, swap_override;
2975
2976         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2977                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2978
2979         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2980
2981                 val = REG_RD(bp, reg_offset);
2982                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2983                 REG_WR(bp, reg_offset, val);
2984
2985                 BNX2X_ERR("SPIO5 hw attention\n");
2986
2987                 /* Fan failure attention */
2988                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2989                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2990                         /* Low power mode is controlled by GPIO 2 */
2991                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2992                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2993                         /* The PHY reset is controlled by GPIO 1 */
2994                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2995                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2996                         break;
2997
2998                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2999                         /* The PHY reset is controlled by GPIO 1 */
3000                         /* fake the port number to cancel the swap done in
3001                            set_gpio() */
3002                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3003                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3004                         port = (swap_val && swap_override) ^ 1;
3005                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3006                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3007                         break;
3008
3009                 default:
3010                         break;
3011                 }
3012                 bnx2x_fan_failure(bp);
3013         }
3014
3015         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3016                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3017                 bnx2x_acquire_phy_lock(bp);
3018                 bnx2x_handle_module_detect_int(&bp->link_params);
3019                 bnx2x_release_phy_lock(bp);
3020         }
3021
3022         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3023
3024                 val = REG_RD(bp, reg_offset);
3025                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3026                 REG_WR(bp, reg_offset, val);
3027
3028                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3029                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3030                 bnx2x_panic();
3031         }
3032 }
3033
3034 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3035 {
3036         u32 val;
3037
3038         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3039
3040                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3041                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3042                 /* DORQ discard attention */
3043                 if (val & 0x2)
3044                         BNX2X_ERR("FATAL error from DORQ\n");
3045         }
3046
3047         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3048
3049                 int port = BP_PORT(bp);
3050                 int reg_offset;
3051
3052                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3053                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3054
3055                 val = REG_RD(bp, reg_offset);
3056                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3057                 REG_WR(bp, reg_offset, val);
3058
3059                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3060                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3061                 bnx2x_panic();
3062         }
3063 }
3064
3065 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3066 {
3067         u32 val;
3068
3069         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3070
3071                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3072                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3073                 /* CFC error attention */
3074                 if (val & 0x2)
3075                         BNX2X_ERR("FATAL error from CFC\n");
3076         }
3077
3078         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3079
3080                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3081                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3082                 /* RQ_USDMDP_FIFO_OVERFLOW */
3083                 if (val & 0x18000)
3084                         BNX2X_ERR("FATAL error from PXP\n");
3085         }
3086
3087         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3088
3089                 int port = BP_PORT(bp);
3090                 int reg_offset;
3091
3092                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3093                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3094
3095                 val = REG_RD(bp, reg_offset);
3096                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3097                 REG_WR(bp, reg_offset, val);
3098
3099                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3100                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3101                 bnx2x_panic();
3102         }
3103 }
3104
3105 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3106 {
3107         u32 val;
3108
3109         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3110
3111                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3112                         int func = BP_FUNC(bp);
3113
3114                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3115                         bp->mf_config = SHMEM_RD(bp,
3116                                            mf_cfg.func_mf_config[func].config);
3117                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3118                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3119                                 bnx2x_dcc_event(bp,
3120                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3121                         bnx2x__link_status_update(bp);
3122                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3123                                 bnx2x_pmf_update(bp);
3124
3125                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3126
3127                         BNX2X_ERR("MC assert!\n");
3128                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3129                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3130                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3131                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3132                         bnx2x_panic();
3133
3134                 } else if (attn & BNX2X_MCP_ASSERT) {
3135
3136                         BNX2X_ERR("MCP assert!\n");
3137                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3138                         bnx2x_fw_dump(bp);
3139
3140                 } else
3141                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3142         }
3143
3144         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3145                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3146                 if (attn & BNX2X_GRC_TIMEOUT) {
3147                         val = CHIP_IS_E1H(bp) ?
3148                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3149                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3150                 }
3151                 if (attn & BNX2X_GRC_RSV) {
3152                         val = CHIP_IS_E1H(bp) ?
3153                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3154                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3155                 }
3156                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3157         }
3158 }
3159
3160 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3161 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3162
3163
3164 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3165 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3166 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3167 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3168 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3169 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3170 /*
3171  * should be run under rtnl lock
3172  */
3173 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3174 {
3175         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3176         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3177         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3178         barrier();
3179         mmiowb();
3180 }
3181
3182 /*
3183  * should be run under rtnl lock
3184  */
3185 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3186 {
3187         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3188         val |= (1 << 16);
3189         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3190         barrier();
3191         mmiowb();
3192 }
3193
3194 /*
3195  * should be run under rtnl lock
3196  */
3197 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3198 {
3199         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3201         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3202 }
3203
3204 /*
3205  * should be run under rtnl lock
3206  */
3207 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3208 {
3209         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3210
3211         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3212
3213         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3214         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3215         barrier();
3216         mmiowb();
3217 }
3218
3219 /*
3220  * should be run under rtnl lock
3221  */
3222 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3223 {
3224         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225
3226         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3227
3228         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3229         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3230         barrier();
3231         mmiowb();
3232
3233         return val1;
3234 }
3235
3236 /*
3237  * should be run under rtnl lock
3238  */
3239 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3240 {
3241         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3242 }
3243
3244 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3245 {
3246         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3247         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3248 }
3249
3250 static inline void _print_next_block(int idx, const char *blk)
3251 {
3252         if (idx)
3253                 pr_cont(", ");
3254         pr_cont("%s", blk);
3255 }
3256
3257 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3258 {
3259         int i = 0;
3260         u32 cur_bit = 0;
3261         for (i = 0; sig; i++) {
3262                 cur_bit = ((u32)0x1 << i);
3263                 if (sig & cur_bit) {
3264                         switch (cur_bit) {
3265                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3266                                 _print_next_block(par_num++, "BRB");
3267                                 break;
3268                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3269                                 _print_next_block(par_num++, "PARSER");
3270                                 break;
3271                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3272                                 _print_next_block(par_num++, "TSDM");
3273                                 break;
3274                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3275                                 _print_next_block(par_num++, "SEARCHER");
3276                                 break;
3277                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3278                                 _print_next_block(par_num++, "TSEMI");
3279                                 break;
3280                         }
3281
3282                         /* Clear the bit */
3283                         sig &= ~cur_bit;
3284                 }
3285         }
3286
3287         return par_num;
3288 }
3289
3290 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3291 {
3292         int i = 0;
3293         u32 cur_bit = 0;
3294         for (i = 0; sig; i++) {
3295                 cur_bit = ((u32)0x1 << i);
3296                 if (sig & cur_bit) {
3297                         switch (cur_bit) {
3298                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3299                                 _print_next_block(par_num++, "PBCLIENT");
3300                                 break;
3301                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3302                                 _print_next_block(par_num++, "QM");
3303                                 break;
3304                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3305                                 _print_next_block(par_num++, "XSDM");
3306                                 break;
3307                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3308                                 _print_next_block(par_num++, "XSEMI");
3309                                 break;
3310                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3311                                 _print_next_block(par_num++, "DOORBELLQ");
3312                                 break;
3313                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3314                                 _print_next_block(par_num++, "VAUX PCI CORE");
3315                                 break;
3316                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3317                                 _print_next_block(par_num++, "DEBUG");
3318                                 break;
3319                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3320                                 _print_next_block(par_num++, "USDM");
3321                                 break;
3322                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3323                                 _print_next_block(par_num++, "USEMI");
3324                                 break;
3325                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3326                                 _print_next_block(par_num++, "UPB");
3327                                 break;
3328                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3329                                 _print_next_block(par_num++, "CSDM");
3330                                 break;
3331                         }
3332
3333                         /* Clear the bit */
3334                         sig &= ~cur_bit;
3335                 }
3336         }
3337
3338         return par_num;
3339 }
3340
3341 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3342 {
3343         int i = 0;
3344         u32 cur_bit = 0;
3345         for (i = 0; sig; i++) {
3346                 cur_bit = ((u32)0x1 << i);
3347                 if (sig & cur_bit) {
3348                         switch (cur_bit) {
3349                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3350                                 _print_next_block(par_num++, "CSEMI");
3351                                 break;
3352                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3353                                 _print_next_block(par_num++, "PXP");
3354                                 break;
3355                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3356                                 _print_next_block(par_num++,
3357                                         "PXPPCICLOCKCLIENT");
3358                                 break;
3359                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3360                                 _print_next_block(par_num++, "CFC");
3361                                 break;
3362                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3363                                 _print_next_block(par_num++, "CDU");
3364                                 break;
3365                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3366                                 _print_next_block(par_num++, "IGU");
3367                                 break;
3368                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3369                                 _print_next_block(par_num++, "MISC");
3370                                 break;
3371                         }
3372
3373                         /* Clear the bit */
3374                         sig &= ~cur_bit;
3375                 }
3376         }
3377
3378         return par_num;
3379 }
3380
3381 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3382 {
3383         int i = 0;
3384         u32 cur_bit = 0;
3385         for (i = 0; sig; i++) {
3386                 cur_bit = ((u32)0x1 << i);
3387                 if (sig & cur_bit) {
3388                         switch (cur_bit) {
3389                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3390                                 _print_next_block(par_num++, "MCP ROM");
3391                                 break;
3392                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3393                                 _print_next_block(par_num++, "MCP UMP RX");
3394                                 break;
3395                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3396                                 _print_next_block(par_num++, "MCP UMP TX");
3397                                 break;
3398                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3399                                 _print_next_block(par_num++, "MCP SCPAD");
3400                                 break;
3401                         }
3402
3403                         /* Clear the bit */
3404                         sig &= ~cur_bit;
3405                 }
3406         }
3407
3408         return par_num;
3409 }
3410
3411 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3412                                      u32 sig2, u32 sig3)
3413 {
3414         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3415             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3416                 int par_num = 0;
3417                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3418                         "[0]:0x%08x [1]:0x%08x "
3419                         "[2]:0x%08x [3]:0x%08x\n",
3420                           sig0 & HW_PRTY_ASSERT_SET_0,
3421                           sig1 & HW_PRTY_ASSERT_SET_1,
3422                           sig2 & HW_PRTY_ASSERT_SET_2,
3423                           sig3 & HW_PRTY_ASSERT_SET_3);
3424                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3425                        bp->dev->name);
3426                 par_num = bnx2x_print_blocks_with_parity0(
3427                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3428                 par_num = bnx2x_print_blocks_with_parity1(
3429                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3430                 par_num = bnx2x_print_blocks_with_parity2(
3431                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3432                 par_num = bnx2x_print_blocks_with_parity3(
3433                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3434                 printk("\n");
3435                 return true;
3436         } else
3437                 return false;
3438 }
3439
3440 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3441 {
3442         struct attn_route attn;
3443         int port = BP_PORT(bp);
3444
3445         attn.sig[0] = REG_RD(bp,
3446                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3447                              port*4);
3448         attn.sig[1] = REG_RD(bp,
3449                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3450                              port*4);
3451         attn.sig[2] = REG_RD(bp,
3452                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3453                              port*4);
3454         attn.sig[3] = REG_RD(bp,
3455                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3456                              port*4);
3457
3458         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3459                                         attn.sig[3]);
3460 }
3461
3462 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3463 {
3464         struct attn_route attn, *group_mask;
3465         int port = BP_PORT(bp);
3466         int index;
3467         u32 reg_addr;
3468         u32 val;
3469         u32 aeu_mask;
3470
3471         /* need to take HW lock because MCP or other port might also
3472            try to handle this event */
3473         bnx2x_acquire_alr(bp);
3474
3475         if (bnx2x_chk_parity_attn(bp)) {
3476                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3477                 bnx2x_set_reset_in_progress(bp);
3478                 schedule_delayed_work(&bp->reset_task, 0);
3479                 /* Disable HW interrupts */
3480                 bnx2x_int_disable(bp);
3481                 bnx2x_release_alr(bp);
3482                 /* In case of parity errors don't handle attentions so that
3483                  * other function would "see" parity errors.
3484                  */
3485                 return;
3486         }
3487
3488         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3489         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3490         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3491         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3492         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3493            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3494
3495         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3496                 if (deasserted & (1 << index)) {
3497                         group_mask = &bp->attn_group[index];
3498
3499                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3500                            index, group_mask->sig[0], group_mask->sig[1],
3501                            group_mask->sig[2], group_mask->sig[3]);
3502
3503                         bnx2x_attn_int_deasserted3(bp,
3504                                         attn.sig[3] & group_mask->sig[3]);
3505                         bnx2x_attn_int_deasserted1(bp,
3506                                         attn.sig[1] & group_mask->sig[1]);
3507                         bnx2x_attn_int_deasserted2(bp,
3508                                         attn.sig[2] & group_mask->sig[2]);
3509                         bnx2x_attn_int_deasserted0(bp,
3510                                         attn.sig[0] & group_mask->sig[0]);
3511                 }
3512         }
3513
3514         bnx2x_release_alr(bp);
3515
3516         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3517
3518         val = ~deasserted;
3519         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3520            val, reg_addr);
3521         REG_WR(bp, reg_addr, val);
3522
3523         if (~bp->attn_state & deasserted)
3524                 BNX2X_ERR("IGU ERROR\n");
3525
3526         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3527                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3528
3529         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3530         aeu_mask = REG_RD(bp, reg_addr);
3531
3532         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3533            aeu_mask, deasserted);
3534         aeu_mask |= (deasserted & 0x3ff);
3535         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3536
3537         REG_WR(bp, reg_addr, aeu_mask);
3538         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3539
3540         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3541         bp->attn_state &= ~deasserted;
3542         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3543 }
3544
3545 static void bnx2x_attn_int(struct bnx2x *bp)
3546 {
3547         /* read local copy of bits */
3548         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3549                                                                 attn_bits);
3550         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3551                                                                 attn_bits_ack);
3552         u32 attn_state = bp->attn_state;
3553
3554         /* look for changed bits */
3555         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3556         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3557
3558         DP(NETIF_MSG_HW,
3559            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3560            attn_bits, attn_ack, asserted, deasserted);
3561
3562         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3563                 BNX2X_ERR("BAD attention state\n");
3564
3565         /* handle bits that were raised */
3566         if (asserted)
3567                 bnx2x_attn_int_asserted(bp, asserted);
3568
3569         if (deasserted)
3570                 bnx2x_attn_int_deasserted(bp, deasserted);
3571 }
3572
3573 static void bnx2x_sp_task(struct work_struct *work)
3574 {
3575         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3576         u16 status;
3577
3578         /* Return here if interrupt is disabled */
3579         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3580                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3581                 return;
3582         }
3583
3584         status = bnx2x_update_dsb_idx(bp);
3585 /*      if (status == 0)                                     */
3586 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3587
3588         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3589
3590         /* HW attentions */
3591         if (status & 0x1) {
3592                 bnx2x_attn_int(bp);
3593                 status &= ~0x1;
3594         }
3595
3596         /* CStorm events: STAT_QUERY */
3597         if (status & 0x2) {
3598                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3599                 status &= ~0x2;
3600         }
3601
3602         if (unlikely(status))
3603                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3604                    status);
3605
3606         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3607                      IGU_INT_NOP, 1);
3608         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3609                      IGU_INT_NOP, 1);
3610         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3611                      IGU_INT_NOP, 1);
3612         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3613                      IGU_INT_NOP, 1);
3614         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3615                      IGU_INT_ENABLE, 1);
3616 }
3617
3618 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3619 {
3620         struct net_device *dev = dev_instance;
3621         struct bnx2x *bp = netdev_priv(dev);
3622
3623         /* Return here if interrupt is disabled */
3624         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3625                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3626                 return IRQ_HANDLED;
3627         }
3628
3629         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3630
3631 #ifdef BNX2X_STOP_ON_ERROR
3632         if (unlikely(bp->panic))
3633                 return IRQ_HANDLED;
3634 #endif
3635
3636 #ifdef BCM_CNIC
3637         {
3638                 struct cnic_ops *c_ops;
3639
3640                 rcu_read_lock();
3641                 c_ops = rcu_dereference(bp->cnic_ops);
3642                 if (c_ops)
3643                         c_ops->cnic_handler(bp->cnic_data, NULL);
3644                 rcu_read_unlock();
3645         }
3646 #endif
3647         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3648
3649         return IRQ_HANDLED;
3650 }
3651
3652 /* end of slow path */
3653
3654 /* Statistics */
3655
3656 /****************************************************************************
3657 * Macros
3658 ****************************************************************************/
3659
3660 /* sum[hi:lo] += add[hi:lo] */
3661 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3662         do { \
3663                 s_lo += a_lo; \
3664                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3665         } while (0)
3666
3667 /* difference = minuend - subtrahend */
3668 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3669         do { \
3670                 if (m_lo < s_lo) { \
3671                         /* underflow */ \
3672                         d_hi = m_hi - s_hi; \
3673                         if (d_hi > 0) { \
3674                                 /* we can 'loan' 1 */ \
3675                                 d_hi--; \
3676                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3677                         } else { \
3678                                 /* m_hi <= s_hi */ \
3679                                 d_hi = 0; \
3680                                 d_lo = 0; \
3681                         } \
3682                 } else { \
3683                         /* m_lo >= s_lo */ \
3684                         if (m_hi < s_hi) { \
3685                                 d_hi = 0; \
3686                                 d_lo = 0; \
3687                         } else { \
3688                                 /* m_hi >= s_hi */ \
3689                                 d_hi = m_hi - s_hi; \
3690                                 d_lo = m_lo - s_lo; \
3691                         } \
3692                 } \
3693         } while (0)
3694
3695 #define UPDATE_STAT64(s, t) \
3696         do { \
3697                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3698                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3699                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3700                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3701                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3702                        pstats->mac_stx[1].t##_lo, diff.lo); \
3703         } while (0)
3704
3705 #define UPDATE_STAT64_NIG(s, t) \
3706         do { \
3707                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3708                         diff.lo, new->s##_lo, old->s##_lo); \
3709                 ADD_64(estats->t##_hi, diff.hi, \
3710                        estats->t##_lo, diff.lo); \
3711         } while (0)
3712
3713 /* sum[hi:lo] += add */
3714 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3715         do { \
3716                 s_lo += a; \
3717                 s_hi += (s_lo < a) ? 1 : 0; \
3718         } while (0)
3719
3720 #define UPDATE_EXTEND_STAT(s) \
3721         do { \
3722                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3723                               pstats->mac_stx[1].s##_lo, \
3724                               new->s); \
3725         } while (0)
3726
3727 #define UPDATE_EXTEND_TSTAT(s, t) \
3728         do { \
3729                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3730                 old_tclient->s = tclient->s; \
3731                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3732         } while (0)
3733
3734 #define UPDATE_EXTEND_USTAT(s, t) \
3735         do { \
3736                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3737                 old_uclient->s = uclient->s; \
3738                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3739         } while (0)
3740
3741 #define UPDATE_EXTEND_XSTAT(s, t) \
3742         do { \
3743                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3744                 old_xclient->s = xclient->s; \
3745                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3746         } while (0)
3747
3748 /* minuend -= subtrahend */
3749 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3750         do { \
3751                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3752         } while (0)
3753
3754 /* minuend[hi:lo] -= subtrahend */
3755 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3756         do { \
3757                 SUB_64(m_hi, 0, m_lo, s); \
3758         } while (0)
3759
3760 #define SUB_EXTEND_USTAT(s, t) \
3761         do { \
3762                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3763                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3764         } while (0)
3765
3766 /*
3767  * General service functions
3768  */
3769
3770 static inline long bnx2x_hilo(u32 *hiref)
3771 {
3772         u32 lo = *(hiref + 1);
3773 #if (BITS_PER_LONG == 64)
3774         u32 hi = *hiref;
3775
3776         return HILO_U64(hi, lo);
3777 #else
3778         return lo;
3779 #endif
3780 }
3781
3782 /*
3783  * Init service functions
3784  */
3785
3786 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3787 {
3788         if (!bp->stats_pending) {
3789                 struct eth_query_ramrod_data ramrod_data = {0};
3790                 int i, rc;
3791
3792                 ramrod_data.drv_counter = bp->stats_counter++;
3793                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3794                 for_each_queue(bp, i)
3795                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3796
3797                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3798                                    ((u32 *)&ramrod_data)[1],
3799                                    ((u32 *)&ramrod_data)[0], 0);
3800                 if (rc == 0) {
3801                         /* stats ramrod has it's own slot on the spq */
3802                         bp->spq_left++;
3803                         bp->stats_pending = 1;
3804                 }
3805         }
3806 }
3807
3808 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3809 {
3810         struct dmae_command *dmae = &bp->stats_dmae;
3811         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3812
3813         *stats_comp = DMAE_COMP_VAL;
3814         if (CHIP_REV_IS_SLOW(bp))
3815                 return;
3816
3817         /* loader */
3818         if (bp->executer_idx) {
3819                 int loader_idx = PMF_DMAE_C(bp);
3820
3821                 memset(dmae, 0, sizeof(struct dmae_command));
3822
3823                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3824                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3825                                 DMAE_CMD_DST_RESET |
3826 #ifdef __BIG_ENDIAN
3827                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3828 #else
3829                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3830 #endif
3831                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3832                                                DMAE_CMD_PORT_0) |
3833                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3834                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3835                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3836                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3837                                      sizeof(struct dmae_command) *
3838                                      (loader_idx + 1)) >> 2;
3839                 dmae->dst_addr_hi = 0;
3840                 dmae->len = sizeof(struct dmae_command) >> 2;
3841                 if (CHIP_IS_E1(bp))
3842                         dmae->len--;
3843                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3844                 dmae->comp_addr_hi = 0;
3845                 dmae->comp_val = 1;
3846
3847                 *stats_comp = 0;
3848                 bnx2x_post_dmae(bp, dmae, loader_idx);
3849
3850         } else if (bp->func_stx) {
3851                 *stats_comp = 0;
3852                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3853         }
3854 }
3855
3856 static int bnx2x_stats_comp(struct bnx2x *bp)
3857 {
3858         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3859         int cnt = 10;
3860
3861         might_sleep();
3862         while (*stats_comp != DMAE_COMP_VAL) {
3863                 if (!cnt) {
3864                         BNX2X_ERR("timeout waiting for stats finished\n");
3865                         break;
3866                 }
3867                 cnt--;
3868                 msleep(1);
3869         }
3870         return 1;
3871 }
3872
3873 /*
3874  * Statistics service functions
3875  */
3876
3877 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3878 {
3879         struct dmae_command *dmae;
3880         u32 opcode;
3881         int loader_idx = PMF_DMAE_C(bp);
3882         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3883
3884         /* sanity */
3885         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3886                 BNX2X_ERR("BUG!\n");
3887                 return;
3888         }
3889
3890         bp->executer_idx = 0;
3891
3892         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3893                   DMAE_CMD_C_ENABLE |
3894                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3895 #ifdef __BIG_ENDIAN
3896                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3897 #else
3898                   DMAE_CMD_ENDIANITY_DW_SWAP |
3899 #endif
3900                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3901                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3902
3903         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3904         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3905         dmae->src_addr_lo = bp->port.port_stx >> 2;
3906         dmae->src_addr_hi = 0;
3907         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3908         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3909         dmae->len = DMAE_LEN32_RD_MAX;
3910         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3911         dmae->comp_addr_hi = 0;
3912         dmae->comp_val = 1;
3913
3914         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3915         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3916         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3917         dmae->src_addr_hi = 0;
3918         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3919                                    DMAE_LEN32_RD_MAX * 4);
3920         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3921                                    DMAE_LEN32_RD_MAX * 4);
3922         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3923         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3924         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3925         dmae->comp_val = DMAE_COMP_VAL;
3926
3927         *stats_comp = 0;
3928         bnx2x_hw_stats_post(bp);
3929         bnx2x_stats_comp(bp);
3930 }
3931
3932 static void bnx2x_port_stats_init(struct bnx2x *bp)
3933 {
3934         struct dmae_command *dmae;
3935         int port = BP_PORT(bp);
3936         int vn = BP_E1HVN(bp);
3937         u32 opcode;
3938         int loader_idx = PMF_DMAE_C(bp);
3939         u32 mac_addr;
3940         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3941
3942         /* sanity */
3943         if (!bp->link_vars.link_up || !bp->port.pmf) {
3944                 BNX2X_ERR("BUG!\n");
3945                 return;
3946         }
3947
3948         bp->executer_idx = 0;
3949
3950         /* MCP */
3951         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3952                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3953                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3954 #ifdef __BIG_ENDIAN
3955                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3956 #else
3957                   DMAE_CMD_ENDIANITY_DW_SWAP |
3958 #endif
3959                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3960                   (vn << DMAE_CMD_E1HVN_SHIFT));
3961
3962         if (bp->port.port_stx) {
3963
3964                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3965                 dmae->opcode = opcode;
3966                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3967                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3968                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3969                 dmae->dst_addr_hi = 0;
3970                 dmae->len = sizeof(struct host_port_stats) >> 2;
3971                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3972                 dmae->comp_addr_hi = 0;
3973                 dmae->comp_val = 1;
3974         }
3975
3976         if (bp->func_stx) {
3977
3978                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3979                 dmae->opcode = opcode;
3980                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3981                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3982                 dmae->dst_addr_lo = bp->func_stx >> 2;
3983                 dmae->dst_addr_hi = 0;
3984                 dmae->len = sizeof(struct host_func_stats) >> 2;
3985                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3986                 dmae->comp_addr_hi = 0;
3987                 dmae->comp_val = 1;
3988         }
3989
3990         /* MAC */
3991         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3992                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3993                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3994 #ifdef __BIG_ENDIAN
3995                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3996 #else
3997                   DMAE_CMD_ENDIANITY_DW_SWAP |
3998 #endif
3999                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4000                   (vn << DMAE_CMD_E1HVN_SHIFT));
4001
4002         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
4003
4004                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4005                                    NIG_REG_INGRESS_BMAC0_MEM);
4006
4007                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4008                    BIGMAC_REGISTER_TX_STAT_GTBYT */
4009                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4010                 dmae->opcode = opcode;
4011                 dmae->src_addr_lo = (mac_addr +
4012                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4013                 dmae->src_addr_hi = 0;
4014                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4015                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4016                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4017                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4018                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4019                 dmae->comp_addr_hi = 0;
4020                 dmae->comp_val = 1;
4021
4022                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4023                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
4024                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4025                 dmae->opcode = opcode;
4026                 dmae->src_addr_lo = (mac_addr +
4027                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4028                 dmae->src_addr_hi = 0;
4029                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4030                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4031                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4032                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4033                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4034                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4035                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4036                 dmae->comp_addr_hi = 0;
4037                 dmae->comp_val = 1;
4038
4039         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4040
4041                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4042
4043                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4044                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4045                 dmae->opcode = opcode;
4046                 dmae->src_addr_lo = (mac_addr +
4047                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4048                 dmae->src_addr_hi = 0;
4049                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4050                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4051                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4052                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4053                 dmae->comp_addr_hi = 0;
4054                 dmae->comp_val = 1;
4055
4056                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4057                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4058                 dmae->opcode = opcode;
4059                 dmae->src_addr_lo = (mac_addr +
4060                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4061                 dmae->src_addr_hi = 0;
4062                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4063                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4064                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4065                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4066                 dmae->len = 1;
4067                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4068                 dmae->comp_addr_hi = 0;
4069                 dmae->comp_val = 1;
4070
4071                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4072                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4073                 dmae->opcode = opcode;
4074                 dmae->src_addr_lo = (mac_addr +
4075                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4076                 dmae->src_addr_hi = 0;
4077                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4078                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4079                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4080                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4081                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4082                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4083                 dmae->comp_addr_hi = 0;
4084                 dmae->comp_val = 1;
4085         }
4086
4087         /* NIG */
4088         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4089         dmae->opcode = opcode;
4090         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4091                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
4092         dmae->src_addr_hi = 0;
4093         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4094         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4095         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4096         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4097         dmae->comp_addr_hi = 0;
4098         dmae->comp_val = 1;
4099
4100         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4101         dmae->opcode = opcode;
4102         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4103                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4104         dmae->src_addr_hi = 0;
4105         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4106                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4107         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4108                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4109         dmae->len = (2*sizeof(u32)) >> 2;
4110         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4111         dmae->comp_addr_hi = 0;
4112         dmae->comp_val = 1;
4113
4114         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4115         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4116                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4117                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4118 #ifdef __BIG_ENDIAN
4119                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4120 #else
4121                         DMAE_CMD_ENDIANITY_DW_SWAP |
4122 #endif
4123                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4124                         (vn << DMAE_CMD_E1HVN_SHIFT));
4125         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4126                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4127         dmae->src_addr_hi = 0;
4128         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4129                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4130         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4131                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4132         dmae->len = (2*sizeof(u32)) >> 2;
4133         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4134         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4135         dmae->comp_val = DMAE_COMP_VAL;
4136
4137         *stats_comp = 0;
4138 }
4139
4140 static void bnx2x_func_stats_init(struct bnx2x *bp)
4141 {
4142         struct dmae_command *dmae = &bp->stats_dmae;
4143         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4144
4145         /* sanity */
4146         if (!bp->func_stx) {
4147                 BNX2X_ERR("BUG!\n");
4148                 return;
4149         }
4150
4151         bp->executer_idx = 0;
4152         memset(dmae, 0, sizeof(struct dmae_command));
4153
4154         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4155                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4156                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4157 #ifdef __BIG_ENDIAN
4158                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4159 #else
4160                         DMAE_CMD_ENDIANITY_DW_SWAP |
4161 #endif
4162                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4163                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4164         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4165         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4166         dmae->dst_addr_lo = bp->func_stx >> 2;
4167         dmae->dst_addr_hi = 0;
4168         dmae->len = sizeof(struct host_func_stats) >> 2;
4169         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4170         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4171         dmae->comp_val = DMAE_COMP_VAL;
4172
4173         *stats_comp = 0;
4174 }
4175
4176 static void bnx2x_stats_start(struct bnx2x *bp)
4177 {
4178         if (bp->port.pmf)
4179                 bnx2x_port_stats_init(bp);
4180
4181         else if (bp->func_stx)
4182                 bnx2x_func_stats_init(bp);
4183
4184         bnx2x_hw_stats_post(bp);
4185         bnx2x_storm_stats_post(bp);
4186 }
4187
4188 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4189 {
4190         bnx2x_stats_comp(bp);
4191         bnx2x_stats_pmf_update(bp);
4192         bnx2x_stats_start(bp);
4193 }
4194
4195 static void bnx2x_stats_restart(struct bnx2x *bp)
4196 {
4197         bnx2x_stats_comp(bp);
4198         bnx2x_stats_start(bp);
4199 }
4200
4201 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4202 {
4203         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4204         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4205         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4206         struct {
4207                 u32 lo;
4208                 u32 hi;
4209         } diff;
4210
4211         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4212         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4213         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4214         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4215         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4216         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4217         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4218         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4219         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4220         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4221         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4222         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4223         UPDATE_STAT64(tx_stat_gt127,
4224                                 tx_stat_etherstatspkts65octetsto127octets);
4225         UPDATE_STAT64(tx_stat_gt255,
4226                                 tx_stat_etherstatspkts128octetsto255octets);
4227         UPDATE_STAT64(tx_stat_gt511,
4228                                 tx_stat_etherstatspkts256octetsto511octets);
4229         UPDATE_STAT64(tx_stat_gt1023,
4230                                 tx_stat_etherstatspkts512octetsto1023octets);
4231         UPDATE_STAT64(tx_stat_gt1518,
4232                                 tx_stat_etherstatspkts1024octetsto1522octets);
4233         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4234         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4235         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4236         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4237         UPDATE_STAT64(tx_stat_gterr,
4238                                 tx_stat_dot3statsinternalmactransmiterrors);
4239         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4240
4241         estats->pause_frames_received_hi =
4242                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4243         estats->pause_frames_received_lo =
4244                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4245
4246         estats->pause_frames_sent_hi =
4247                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4248         estats->pause_frames_sent_lo =
4249                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4250 }
4251
4252 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4253 {
4254         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4255         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4256         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4257
4258         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4259         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4260         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4261         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4262         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4263         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4264         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4265         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4266         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4267         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4268         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4269         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4270         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4271         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4272         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4273         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4274         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4275         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4276         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4277         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4278         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4279         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4280         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4281         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4282         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4283         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4284         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4285         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4286         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4287         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4288         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4289
4290         estats->pause_frames_received_hi =
4291                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4292         estats->pause_frames_received_lo =
4293                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4294         ADD_64(estats->pause_frames_received_hi,
4295                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4296                estats->pause_frames_received_lo,
4297                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4298
4299         estats->pause_frames_sent_hi =
4300                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
4301         estats->pause_frames_sent_lo =
4302                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
4303         ADD_64(estats->pause_frames_sent_hi,
4304                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4305                estats->pause_frames_sent_lo,
4306                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4307 }
4308
4309 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4310 {
4311         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4312         struct nig_stats *old = &(bp->port.old_nig_stats);
4313         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4314         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4315         struct {
4316                 u32 lo;
4317                 u32 hi;
4318         } diff;
4319
4320         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4321                 bnx2x_bmac_stats_update(bp);
4322
4323         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4324                 bnx2x_emac_stats_update(bp);
4325
4326         else { /* unreached */
4327                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4328                 return -1;
4329         }
4330
4331         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4332                       new->brb_discard - old->brb_discard);
4333         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4334                       new->brb_truncate - old->brb_truncate);
4335
4336         UPDATE_STAT64_NIG(egress_mac_pkt0,
4337                                         etherstatspkts1024octetsto1522octets);
4338         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4339
4340         memcpy(old, new, sizeof(struct nig_stats));
4341
4342         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4343                sizeof(struct mac_stx));
4344         estats->brb_drop_hi = pstats->brb_drop_hi;
4345         estats->brb_drop_lo = pstats->brb_drop_lo;
4346
4347         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4348
4349         if (!BP_NOMCP(bp)) {
4350                 u32 nig_timer_max =
4351                         SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4352                 if (nig_timer_max != estats->nig_timer_max) {
4353                         estats->nig_timer_max = nig_timer_max;
4354                         BNX2X_ERR("NIG timer max (%u)\n",
4355                                   estats->nig_timer_max);
4356                 }
4357         }
4358
4359         return 0;
4360 }
4361
4362 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4363 {
4364         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4365         struct tstorm_per_port_stats *tport =
4366                                         &stats->tstorm_common.port_statistics;
4367         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4368         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4369         int i;
4370
4371         memcpy(&(fstats->total_bytes_received_hi),
4372                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4373                sizeof(struct host_func_stats) - 2*sizeof(u32));
4374         estats->error_bytes_received_hi = 0;
4375         estats->error_bytes_received_lo = 0;
4376         estats->etherstatsoverrsizepkts_hi = 0;
4377         estats->etherstatsoverrsizepkts_lo = 0;
4378         estats->no_buff_discard_hi = 0;
4379         estats->no_buff_discard_lo = 0;
4380
4381         for_each_queue(bp, i) {
4382                 struct bnx2x_fastpath *fp = &bp->fp[i];
4383                 int cl_id = fp->cl_id;
4384                 struct tstorm_per_client_stats *tclient =
4385                                 &stats->tstorm_common.client_statistics[cl_id];
4386                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4387                 struct ustorm_per_client_stats *uclient =
4388                                 &stats->ustorm_common.client_statistics[cl_id];
4389                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4390                 struct xstorm_per_client_stats *xclient =
4391                                 &stats->xstorm_common.client_statistics[cl_id];
4392                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4393                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4394                 u32 diff;
4395
4396                 /* are storm stats valid? */
4397                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4398                                                         bp->stats_counter) {
4399                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4400                            "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
4401                            i, xclient->stats_counter, bp->stats_counter);
4402                         return -1;
4403                 }
4404                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4405                                                         bp->stats_counter) {
4406                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4407                            "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
4408                            i, tclient->stats_counter, bp->stats_counter);
4409                         return -2;
4410                 }
4411                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4412                                                         bp->stats_counter) {
4413                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4414                            "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
4415                            i, uclient->stats_counter, bp->stats_counter);
4416                         return -4;
4417                 }
4418
4419                 qstats->total_bytes_received_hi =
4420                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4421                 qstats->total_bytes_received_lo =
4422                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4423
4424                 ADD_64(qstats->total_bytes_received_hi,
4425                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4426                        qstats->total_bytes_received_lo,
4427                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4428
4429                 ADD_64(qstats->total_bytes_received_hi,
4430                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4431                        qstats->total_bytes_received_lo,
4432                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4433
4434                 SUB_64(qstats->total_bytes_received_hi,
4435                        le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4436                        qstats->total_bytes_received_lo,
4437                        le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4438
4439                 SUB_64(qstats->total_bytes_received_hi,
4440                        le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4441                        qstats->total_bytes_received_lo,
4442                        le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4443
4444                 SUB_64(qstats->total_bytes_received_hi,
4445                        le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4446                        qstats->total_bytes_received_lo,
4447                        le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4448
4449                 qstats->valid_bytes_received_hi =
4450                                         qstats->total_bytes_received_hi;
4451                 qstats->valid_bytes_received_lo =
4452                                         qstats->total_bytes_received_lo;
4453
4454                 qstats->error_bytes_received_hi =
4455                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4456                 qstats->error_bytes_received_lo =
4457                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4458
4459                 ADD_64(qstats->total_bytes_received_hi,
4460                        qstats->error_bytes_received_hi,
4461                        qstats->total_bytes_received_lo,
4462                        qstats->error_bytes_received_lo);
4463
4464                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4465                                         total_unicast_packets_received);
4466                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4467                                         total_multicast_packets_received);
4468                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4469                                         total_broadcast_packets_received);
4470                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4471                                         etherstatsoverrsizepkts);
4472                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4473
4474                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4475                                         total_unicast_packets_received);
4476                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4477                                         total_multicast_packets_received);
4478                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4479                                         total_broadcast_packets_received);
4480                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4481                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4482                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4483
4484                 qstats->total_bytes_transmitted_hi =
4485                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4486                 qstats->total_bytes_transmitted_lo =
4487                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4488
4489                 ADD_64(qstats->total_bytes_transmitted_hi,
4490                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4491                        qstats->total_bytes_transmitted_lo,
4492                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4493
4494                 ADD_64(qstats->total_bytes_transmitted_hi,
4495                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4496                        qstats->total_bytes_transmitted_lo,
4497                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4498
4499                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4500                                         total_unicast_packets_transmitted);
4501                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4502                                         total_multicast_packets_transmitted);
4503                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4504                                         total_broadcast_packets_transmitted);
4505
4506                 old_tclient->checksum_discard = tclient->checksum_discard;
4507                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4508
4509                 ADD_64(fstats->total_bytes_received_hi,
4510                        qstats->total_bytes_received_hi,
4511                        fstats->total_bytes_received_lo,
4512                        qstats->total_bytes_received_lo);
4513                 ADD_64(fstats->total_bytes_transmitted_hi,
4514                        qstats->total_bytes_transmitted_hi,
4515                        fstats->total_bytes_transmitted_lo,
4516                        qstats->total_bytes_transmitted_lo);
4517                 ADD_64(fstats->total_unicast_packets_received_hi,
4518                        qstats->total_unicast_packets_received_hi,
4519                        fstats->total_unicast_packets_received_lo,
4520                        qstats->total_unicast_packets_received_lo);
4521                 ADD_64(fstats->total_multicast_packets_received_hi,
4522                        qstats->total_multicast_packets_received_hi,
4523                        fstats->total_multicast_packets_received_lo,
4524                        qstats->total_multicast_packets_received_lo);
4525                 ADD_64(fstats->total_broadcast_packets_received_hi,
4526                        qstats->total_broadcast_packets_received_hi,
4527                        fstats->total_broadcast_packets_received_lo,
4528                        qstats->total_broadcast_packets_received_lo);
4529                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4530                        qstats->total_unicast_packets_transmitted_hi,
4531                        fstats->total_unicast_packets_transmitted_lo,
4532                        qstats->total_unicast_packets_transmitted_lo);
4533                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4534                        qstats->total_multicast_packets_transmitted_hi,
4535                        fstats->total_multicast_packets_transmitted_lo,
4536                        qstats->total_multicast_packets_transmitted_lo);
4537                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4538                        qstats->total_broadcast_packets_transmitted_hi,
4539                        fstats->total_broadcast_packets_transmitted_lo,
4540                        qstats->total_broadcast_packets_transmitted_lo);
4541                 ADD_64(fstats->valid_bytes_received_hi,
4542                        qstats->valid_bytes_received_hi,
4543                        fstats->valid_bytes_received_lo,
4544                        qstats->valid_bytes_received_lo);
4545
4546                 ADD_64(estats->error_bytes_received_hi,
4547                        qstats->error_bytes_received_hi,
4548                        estats->error_bytes_received_lo,
4549                        qstats->error_bytes_received_lo);
4550                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4551                        qstats->etherstatsoverrsizepkts_hi,
4552                        estats->etherstatsoverrsizepkts_lo,
4553                        qstats->etherstatsoverrsizepkts_lo);
4554                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4555                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4556         }
4557
4558         ADD_64(fstats->total_bytes_received_hi,
4559                estats->rx_stat_ifhcinbadoctets_hi,
4560                fstats->total_bytes_received_lo,
4561                estats->rx_stat_ifhcinbadoctets_lo);
4562
4563         memcpy(estats, &(fstats->total_bytes_received_hi),
4564                sizeof(struct host_func_stats) - 2*sizeof(u32));
4565
4566         ADD_64(estats->etherstatsoverrsizepkts_hi,
4567                estats->rx_stat_dot3statsframestoolong_hi,
4568                estats->etherstatsoverrsizepkts_lo,
4569                estats->rx_stat_dot3statsframestoolong_lo);
4570         ADD_64(estats->error_bytes_received_hi,
4571                estats->rx_stat_ifhcinbadoctets_hi,
4572                estats->error_bytes_received_lo,
4573                estats->rx_stat_ifhcinbadoctets_lo);
4574
4575         if (bp->port.pmf) {
4576                 estats->mac_filter_discard =
4577                                 le32_to_cpu(tport->mac_filter_discard);
4578                 estats->xxoverflow_discard =
4579                                 le32_to_cpu(tport->xxoverflow_discard);
4580                 estats->brb_truncate_discard =
4581                                 le32_to_cpu(tport->brb_truncate_discard);
4582                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4583         }
4584
4585         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4586
4587         bp->stats_pending = 0;
4588
4589         return 0;
4590 }
4591
4592 static void bnx2x_net_stats_update(struct bnx2x *bp)
4593 {
4594         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4595         struct net_device_stats *nstats = &bp->dev->stats;
4596         int i;
4597
4598         nstats->rx_packets =
4599                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4600                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4601                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4602
4603         nstats->tx_packets =
4604                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4605                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4606                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4607
4608         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4609
4610         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4611
4612         nstats->rx_dropped = estats->mac_discard;
4613         for_each_queue(bp, i)
4614                 nstats->rx_dropped +=
4615                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4616
4617         nstats->tx_dropped = 0;
4618
4619         nstats->multicast =
4620                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4621
4622         nstats->collisions =
4623                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4624
4625         nstats->rx_length_errors =
4626                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4627                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4628         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4629                                  bnx2x_hilo(&estats->brb_truncate_hi);
4630         nstats->rx_crc_errors =
4631                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4632         nstats->rx_frame_errors =
4633                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4634         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4635         nstats->rx_missed_errors = estats->xxoverflow_discard;
4636
4637         nstats->rx_errors = nstats->rx_length_errors +
4638                             nstats->rx_over_errors +
4639                             nstats->rx_crc_errors +
4640                             nstats->rx_frame_errors +
4641                             nstats->rx_fifo_errors +
4642                             nstats->rx_missed_errors;
4643
4644         nstats->tx_aborted_errors =
4645                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4646                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4647         nstats->tx_carrier_errors =
4648                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4649         nstats->tx_fifo_errors = 0;
4650         nstats->tx_heartbeat_errors = 0;
4651         nstats->tx_window_errors = 0;
4652
4653         nstats->tx_errors = nstats->tx_aborted_errors +
4654                             nstats->tx_carrier_errors +
4655             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4656 }
4657
4658 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4659 {
4660         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4661         int i;
4662
4663         estats->driver_xoff = 0;
4664         estats->rx_err_discard_pkt = 0;
4665         estats->rx_skb_alloc_failed = 0;
4666         estats->hw_csum_err = 0;
4667         for_each_queue(bp, i) {
4668                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4669
4670                 estats->driver_xoff += qstats->driver_xoff;
4671                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4672                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4673                 estats->hw_csum_err += qstats->hw_csum_err;
4674         }
4675 }
4676
4677 static void bnx2x_stats_update(struct bnx2x *bp)
4678 {
4679         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4680
4681         if (*stats_comp != DMAE_COMP_VAL)
4682                 return;
4683
4684         if (bp->port.pmf)
4685                 bnx2x_hw_stats_update(bp);
4686
4687         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4688                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4689                 bnx2x_panic();
4690                 return;
4691         }
4692
4693         bnx2x_net_stats_update(bp);
4694         bnx2x_drv_stats_update(bp);
4695
4696         if (netif_msg_timer(bp)) {
4697                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4698                 int i;
4699
4700                 printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
4701                        bp->dev->name,
4702                        estats->brb_drop_lo, estats->brb_truncate_lo);
4703
4704                 for_each_queue(bp, i) {
4705                         struct bnx2x_fastpath *fp = &bp->fp[i];
4706                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4707
4708                         printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
4709                                           "  rx pkt(%lu)  rx calls(%lu %lu)\n",
4710                                fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4711                                fp->rx_comp_cons),
4712                                le16_to_cpu(*fp->rx_cons_sb),
4713                                bnx2x_hilo(&qstats->
4714                                           total_unicast_packets_received_hi),
4715                                fp->rx_calls, fp->rx_pkt);
4716                 }
4717
4718                 for_each_queue(bp, i) {
4719                         struct bnx2x_fastpath *fp = &bp->fp[i];
4720                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4721                         struct netdev_queue *txq =
4722                                 netdev_get_tx_queue(bp->dev, i);
4723
4724                         printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
4725                                           "  tx pkt(%lu) tx calls (%lu)"
4726                                           "  %s (Xoff events %u)\n",
4727                                fp->name, bnx2x_tx_avail(fp),
4728                                le16_to_cpu(*fp->tx_cons_sb),
4729                                bnx2x_hilo(&qstats->
4730                                           total_unicast_packets_transmitted_hi),
4731                                fp->tx_pkt,
4732                                (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4733                                qstats->driver_xoff);
4734                 }
4735         }
4736
4737         bnx2x_hw_stats_post(bp);
4738         bnx2x_storm_stats_post(bp);
4739 }
4740
4741 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4742 {
4743         struct dmae_command *dmae;
4744         u32 opcode;
4745         int loader_idx = PMF_DMAE_C(bp);
4746         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4747
4748         bp->executer_idx = 0;
4749
4750         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4751                   DMAE_CMD_C_ENABLE |
4752                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4753 #ifdef __BIG_ENDIAN
4754                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4755 #else
4756                   DMAE_CMD_ENDIANITY_DW_SWAP |
4757 #endif
4758                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4759                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4760
4761         if (bp->port.port_stx) {
4762
4763                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4764                 if (bp->func_stx)
4765                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4766                 else
4767                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4768                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4769                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4770                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4771                 dmae->dst_addr_hi = 0;
4772                 dmae->len = sizeof(struct host_port_stats) >> 2;
4773                 if (bp->func_stx) {
4774                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4775                         dmae->comp_addr_hi = 0;
4776                         dmae->comp_val = 1;
4777                 } else {
4778                         dmae->comp_addr_lo =
4779                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4780                         dmae->comp_addr_hi =
4781                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4782                         dmae->comp_val = DMAE_COMP_VAL;
4783
4784                         *stats_comp = 0;
4785                 }
4786         }
4787
4788         if (bp->func_stx) {
4789
4790                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4791                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4792                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4793                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4794                 dmae->dst_addr_lo = bp->func_stx >> 2;
4795                 dmae->dst_addr_hi = 0;
4796                 dmae->len = sizeof(struct host_func_stats) >> 2;
4797                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4798                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4799                 dmae->comp_val = DMAE_COMP_VAL;
4800
4801                 *stats_comp = 0;
4802         }
4803 }
4804
4805 static void bnx2x_stats_stop(struct bnx2x *bp)
4806 {
4807         int update = 0;
4808
4809         bnx2x_stats_comp(bp);
4810
4811         if (bp->port.pmf)
4812                 update = (bnx2x_hw_stats_update(bp) == 0);
4813
4814         update |= (bnx2x_storm_stats_update(bp) == 0);
4815
4816         if (update) {
4817                 bnx2x_net_stats_update(bp);
4818
4819                 if (bp->port.pmf)
4820                         bnx2x_port_stats_stop(bp);
4821
4822                 bnx2x_hw_stats_post(bp);
4823                 bnx2x_stats_comp(bp);
4824         }
4825 }
4826
4827 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4828 {
4829 }
4830
4831 static const struct {
4832         void (*action)(struct bnx2x *bp);
4833         enum bnx2x_stats_state next_state;
4834 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4835 /* state        event   */
4836 {
4837 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4838 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4839 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4840 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4841 },
4842 {
4843 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4844 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4845 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4846 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4847 }
4848 };
4849
4850 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4851 {
4852         enum bnx2x_stats_state state = bp->stats_state;
4853
4854         if (unlikely(bp->panic))
4855                 return;
4856
4857         bnx2x_stats_stm[state][event].action(bp);
4858         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4859
4860         /* Make sure the state has been "changed" */
4861         smp_wmb();
4862
4863         if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4864                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4865                    state, event, bp->stats_state);
4866 }
4867
4868 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4869 {
4870         struct dmae_command *dmae;
4871         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4872
4873         /* sanity */
4874         if (!bp->port.pmf || !bp->port.port_stx) {
4875                 BNX2X_ERR("BUG!\n");
4876                 return;
4877         }
4878
4879         bp->executer_idx = 0;
4880
4881         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4882         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4883                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4884                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4885 #ifdef __BIG_ENDIAN
4886                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4887 #else
4888                         DMAE_CMD_ENDIANITY_DW_SWAP |
4889 #endif
4890                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4891                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4892         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4893         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4894         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4895         dmae->dst_addr_hi = 0;
4896         dmae->len = sizeof(struct host_port_stats) >> 2;
4897         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4898         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4899         dmae->comp_val = DMAE_COMP_VAL;
4900
4901         *stats_comp = 0;
4902         bnx2x_hw_stats_post(bp);
4903         bnx2x_stats_comp(bp);
4904 }
4905
4906 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4907 {
4908         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4909         int port = BP_PORT(bp);
4910         int func;
4911         u32 func_stx;
4912
4913         /* sanity */
4914         if (!bp->port.pmf || !bp->func_stx) {
4915                 BNX2X_ERR("BUG!\n");
4916                 return;
4917         }
4918
4919         /* save our func_stx */
4920         func_stx = bp->func_stx;
4921
4922         for (vn = VN_0; vn < vn_max; vn++) {
4923                 func = 2*vn + port;
4924
4925                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4926                 bnx2x_func_stats_init(bp);
4927                 bnx2x_hw_stats_post(bp);
4928                 bnx2x_stats_comp(bp);
4929         }
4930
4931         /* restore our func_stx */
4932         bp->func_stx = func_stx;
4933 }
4934
4935 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4936 {
4937         struct dmae_command *dmae = &bp->stats_dmae;
4938         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4939
4940         /* sanity */
4941         if (!bp->func_stx) {
4942                 BNX2X_ERR("BUG!\n");
4943                 return;
4944         }
4945
4946         bp->executer_idx = 0;
4947         memset(dmae, 0, sizeof(struct dmae_command));
4948
4949         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4950                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4951                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4952 #ifdef __BIG_ENDIAN
4953                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4954 #else
4955                         DMAE_CMD_ENDIANITY_DW_SWAP |
4956 #endif
4957                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4958                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4959         dmae->src_addr_lo = bp->func_stx >> 2;
4960         dmae->src_addr_hi = 0;
4961         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4962         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4963         dmae->len = sizeof(struct host_func_stats) >> 2;
4964         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4965         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4966         dmae->comp_val = DMAE_COMP_VAL;
4967
4968         *stats_comp = 0;
4969         bnx2x_hw_stats_post(bp);
4970         bnx2x_stats_comp(bp);
4971 }
4972
4973 static void bnx2x_stats_init(struct bnx2x *bp)
4974 {
4975         int port = BP_PORT(bp);
4976         int func = BP_FUNC(bp);
4977         int i;
4978
4979         bp->stats_pending = 0;
4980         bp->executer_idx = 0;
4981         bp->stats_counter = 0;
4982
4983         /* port and func stats for management */
4984         if (!BP_NOMCP(bp)) {
4985                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4986                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4987
4988         } else {
4989                 bp->port.port_stx = 0;
4990                 bp->func_stx = 0;
4991         }
4992         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4993            bp->port.port_stx, bp->func_stx);
4994
4995         /* port stats */
4996         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4997         bp->port.old_nig_stats.brb_discard =
4998                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4999         bp->port.old_nig_stats.brb_truncate =
5000                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5001         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5002                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5003         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5004                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5005
5006         /* function stats */
5007         for_each_queue(bp, i) {
5008                 struct bnx2x_fastpath *fp = &bp->fp[i];
5009
5010                 memset(&fp->old_tclient, 0,
5011                        sizeof(struct tstorm_per_client_stats));
5012                 memset(&fp->old_uclient, 0,
5013                        sizeof(struct ustorm_per_client_stats));
5014                 memset(&fp->old_xclient, 0,
5015                        sizeof(struct xstorm_per_client_stats));
5016                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5017         }
5018
5019         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5020         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5021
5022         bp->stats_state = STATS_STATE_DISABLED;
5023
5024         if (bp->port.pmf) {
5025                 if (bp->port.port_stx)
5026                         bnx2x_port_stats_base_init(bp);
5027
5028                 if (bp->func_stx)
5029                         bnx2x_func_stats_base_init(bp);
5030
5031         } else if (bp->func_stx)
5032                 bnx2x_func_stats_base_update(bp);
5033 }
5034
5035 static void bnx2x_timer(unsigned long data)
5036 {
5037         struct bnx2x *bp = (struct bnx2x *) data;
5038
5039         if (!netif_running(bp->dev))
5040                 return;
5041
5042         if (atomic_read(&bp->intr_sem) != 0)
5043                 goto timer_restart;
5044
5045         if (poll) {
5046                 struct bnx2x_fastpath *fp = &bp->fp[0];
5047                 int rc;
5048
5049                 bnx2x_tx_int(fp);
5050                 rc = bnx2x_rx_int(fp, 1000);
5051         }
5052
5053         if (!BP_NOMCP(bp)) {
5054                 int func = BP_FUNC(bp);
5055                 u32 drv_pulse;
5056                 u32 mcp_pulse;
5057
5058                 ++bp->fw_drv_pulse_wr_seq;
5059                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5060                 /* TBD - add SYSTEM_TIME */
5061                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5062                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5063
5064                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5065                              MCP_PULSE_SEQ_MASK);
5066                 /* The delta between driver pulse and mcp response
5067                  * should be 1 (before mcp response) or 0 (after mcp response)
5068                  */
5069                 if ((drv_pulse != mcp_pulse) &&
5070                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5071                         /* someone lost a heartbeat... */
5072                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5073                                   drv_pulse, mcp_pulse);
5074                 }
5075         }
5076
5077         if (bp->state == BNX2X_STATE_OPEN)
5078                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5079
5080 timer_restart:
5081         mod_timer(&bp->timer, jiffies + bp->current_interval);
5082 }
5083
5084 /* end of Statistics */
5085
5086 /* nic init */
5087
5088 /*
5089  * nic init service functions
5090  */
5091
5092 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5093 {
5094         int port = BP_PORT(bp);
5095
5096         /* "CSTORM" */
5097         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5098                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5099                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5100         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5101                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5102                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5103 }
5104
5105 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5106                           dma_addr_t mapping, int sb_id)
5107 {
5108         int port = BP_PORT(bp);
5109         int func = BP_FUNC(bp);
5110         int index;
5111         u64 section;
5112
5113         /* USTORM */
5114         section = ((u64)mapping) + offsetof(struct host_status_block,
5115                                             u_status_block);
5116         sb->u_status_block.status_block_id = sb_id;
5117
5118         REG_WR(bp, BAR_CSTRORM_INTMEM +
5119                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5120         REG_WR(bp, BAR_CSTRORM_INTMEM +
5121                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5122                U64_HI(section));
5123         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5124                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5125
5126         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5127                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5128                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5129
5130         /* CSTORM */
5131         section = ((u64)mapping) + offsetof(struct host_status_block,
5132                                             c_status_block);
5133         sb->c_status_block.status_block_id = sb_id;
5134
5135         REG_WR(bp, BAR_CSTRORM_INTMEM +
5136                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5137         REG_WR(bp, BAR_CSTRORM_INTMEM +
5138                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5139                U64_HI(section));
5140         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5141                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5142
5143         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5144                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5145                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5146
5147         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5148 }
5149
5150 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5151 {
5152         int func = BP_FUNC(bp);
5153
5154         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5155                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5156                         sizeof(struct tstorm_def_status_block)/4);
5157         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5158                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5159                         sizeof(struct cstorm_def_status_block_u)/4);
5160         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5161                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5162                         sizeof(struct cstorm_def_status_block_c)/4);
5163         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5164                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5165                         sizeof(struct xstorm_def_status_block)/4);
5166 }
5167
5168 static void bnx2x_init_def_sb(struct bnx2x *bp,
5169                               struct host_def_status_block *def_sb,
5170                               dma_addr_t mapping, int sb_id)
5171 {
5172         int port = BP_PORT(bp);
5173         int func = BP_FUNC(bp);
5174         int index, val, reg_offset;
5175         u64 section;
5176
5177         /* ATTN */
5178         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5179                                             atten_status_block);
5180         def_sb->atten_status_block.status_block_id = sb_id;
5181
5182         bp->attn_state = 0;
5183
5184         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5185                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5186
5187         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5188                 bp->attn_group[index].sig[0] = REG_RD(bp,
5189                                                      reg_offset + 0x10*index);
5190                 bp->attn_group[index].sig[1] = REG_RD(bp,
5191                                                reg_offset + 0x4 + 0x10*index);
5192                 bp->attn_group[index].sig[2] = REG_RD(bp,
5193                                                reg_offset + 0x8 + 0x10*index);
5194                 bp->attn_group[index].sig[3] = REG_RD(bp,
5195                                                reg_offset + 0xc + 0x10*index);
5196         }
5197
5198         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5199                              HC_REG_ATTN_MSG0_ADDR_L);
5200
5201         REG_WR(bp, reg_offset, U64_LO(section));
5202         REG_WR(bp, reg_offset + 4, U64_HI(section));
5203
5204         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5205
5206         val = REG_RD(bp, reg_offset);
5207         val |= sb_id;
5208         REG_WR(bp, reg_offset, val);
5209
5210         /* USTORM */
5211         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5212                                             u_def_status_block);
5213         def_sb->u_def_status_block.status_block_id = sb_id;
5214
5215         REG_WR(bp, BAR_CSTRORM_INTMEM +
5216                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5217         REG_WR(bp, BAR_CSTRORM_INTMEM +
5218                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5219                U64_HI(section));
5220         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5221                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5222
5223         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5224                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5225                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5226
5227         /* CSTORM */
5228         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5229                                             c_def_status_block);
5230         def_sb->c_def_status_block.status_block_id = sb_id;
5231
5232         REG_WR(bp, BAR_CSTRORM_INTMEM +
5233                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5234         REG_WR(bp, BAR_CSTRORM_INTMEM +
5235                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5236                U64_HI(section));
5237         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5238                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5239
5240         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5241                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5242                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5243
5244         /* TSTORM */
5245         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5246                                             t_def_status_block);
5247         def_sb->t_def_status_block.status_block_id = sb_id;
5248
5249         REG_WR(bp, BAR_TSTRORM_INTMEM +
5250                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5251         REG_WR(bp, BAR_TSTRORM_INTMEM +
5252                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5253                U64_HI(section));
5254         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5255                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5256
5257         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5258                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5259                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5260
5261         /* XSTORM */
5262         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5263                                             x_def_status_block);
5264         def_sb->x_def_status_block.status_block_id = sb_id;
5265
5266         REG_WR(bp, BAR_XSTRORM_INTMEM +
5267                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5268         REG_WR(bp, BAR_XSTRORM_INTMEM +
5269                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5270                U64_HI(section));
5271         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5272                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5273
5274         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5275                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5276                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5277
5278         bp->stats_pending = 0;
5279         bp->set_mac_pending = 0;
5280
5281         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5282 }
5283
5284 static void bnx2x_update_coalesce(struct bnx2x *bp)
5285 {
5286         int port = BP_PORT(bp);
5287         int i;
5288
5289         for_each_queue(bp, i) {
5290                 int sb_id = bp->fp[i].sb_id;
5291
5292                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5293                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5294                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5295                                                       U_SB_ETH_RX_CQ_INDEX),
5296                         bp->rx_ticks/(4 * BNX2X_BTR));
5297                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5298                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5299                                                        U_SB_ETH_RX_CQ_INDEX),
5300                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5301
5302                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5303                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5304                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5305                                                       C_SB_ETH_TX_CQ_INDEX),
5306                         bp->tx_ticks/(4 * BNX2X_BTR));
5307                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5308                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5309                                                        C_SB_ETH_TX_CQ_INDEX),
5310                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5311         }
5312 }
5313
5314 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5315                                        struct bnx2x_fastpath *fp, int last)
5316 {
5317         int i;
5318
5319         for (i = 0; i < last; i++) {
5320                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5321                 struct sk_buff *skb = rx_buf->skb;
5322
5323                 if (skb == NULL) {
5324                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5325                         continue;
5326                 }
5327
5328                 if (fp->tpa_state[i] == BNX2X_TPA_START)
5329                         dma_unmap_single(&bp->pdev->dev,
5330                                          dma_unmap_addr(rx_buf, mapping),
5331                                          bp->rx_buf_size, DMA_FROM_DEVICE);
5332
5333                 dev_kfree_skb(skb);
5334                 rx_buf->skb = NULL;
5335         }
5336 }
5337
5338 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5339 {
5340         int func = BP_FUNC(bp);
5341         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5342                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
5343         u16 ring_prod, cqe_ring_prod;
5344         int i, j;
5345
5346         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5347         DP(NETIF_MSG_IFUP,
5348            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5349
5350         if (bp->flags & TPA_ENABLE_FLAG) {
5351
5352                 for_each_queue(bp, j) {
5353                         struct bnx2x_fastpath *fp = &bp->fp[j];
5354
5355                         for (i = 0; i < max_agg_queues; i++) {
5356                                 fp->tpa_pool[i].skb =
5357                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5358                                 if (!fp->tpa_pool[i].skb) {
5359                                         BNX2X_ERR("Failed to allocate TPA "
5360                                                   "skb pool for queue[%d] - "
5361                                                   "disabling TPA on this "
5362                                                   "queue!\n", j);
5363                                         bnx2x_free_tpa_pool(bp, fp, i);
5364                                         fp->disable_tpa = 1;
5365                                         break;
5366                                 }
5367                                 dma_unmap_addr_set((struct sw_rx_bd *)
5368                                                         &bp->fp->tpa_pool[i],
5369                                                    mapping, 0);
5370                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
5371                         }
5372                 }
5373         }
5374
5375         for_each_queue(bp, j) {
5376                 struct bnx2x_fastpath *fp = &bp->fp[j];
5377
5378                 fp->rx_bd_cons = 0;
5379                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5380                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5381
5382                 /* "next page" elements initialization */
5383                 /* SGE ring */
5384                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5385                         struct eth_rx_sge *sge;
5386
5387                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5388                         sge->addr_hi =
5389                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5390                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5391                         sge->addr_lo =
5392                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5393                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5394                 }
5395
5396                 bnx2x_init_sge_ring_bit_mask(fp);
5397
5398                 /* RX BD ring */
5399                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5400                         struct eth_rx_bd *rx_bd;
5401
5402                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5403                         rx_bd->addr_hi =
5404                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5405                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5406                         rx_bd->addr_lo =
5407                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5408                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5409                 }
5410
5411                 /* CQ ring */
5412                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5413                         struct eth_rx_cqe_next_page *nextpg;
5414
5415                         nextpg = (struct eth_rx_cqe_next_page *)
5416                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5417                         nextpg->addr_hi =
5418                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5419                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5420                         nextpg->addr_lo =
5421                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5422                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5423                 }
5424
5425                 /* Allocate SGEs and initialize the ring elements */
5426                 for (i = 0, ring_prod = 0;
5427                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5428
5429                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5430                                 BNX2X_ERR("was only able to allocate "
5431                                           "%d rx sges\n", i);
5432                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5433                                 /* Cleanup already allocated elements */
5434                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5435                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5436                                 fp->disable_tpa = 1;
5437                                 ring_prod = 0;
5438                                 break;
5439                         }
5440                         ring_prod = NEXT_SGE_IDX(ring_prod);
5441                 }
5442                 fp->rx_sge_prod = ring_prod;
5443
5444                 /* Allocate BDs and initialize BD ring */
5445                 fp->rx_comp_cons = 0;
5446                 cqe_ring_prod = ring_prod = 0;
5447                 for (i = 0; i < bp->rx_ring_size; i++) {
5448                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5449                                 BNX2X_ERR("was only able to allocate "
5450                                           "%d rx skbs on queue[%d]\n", i, j);
5451                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5452                                 break;
5453                         }
5454                         ring_prod = NEXT_RX_IDX(ring_prod);
5455                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5456                         WARN_ON(ring_prod <= i);
5457                 }
5458
5459                 fp->rx_bd_prod = ring_prod;
5460                 /* must not have more available CQEs than BDs */
5461                 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5462                                          cqe_ring_prod);
5463                 fp->rx_pkt = fp->rx_calls = 0;
5464
5465                 /* Warning!
5466                  * this will generate an interrupt (to the TSTORM)
5467                  * must only be done after chip is initialized
5468                  */
5469                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5470                                      fp->rx_sge_prod);
5471                 if (j != 0)
5472                         continue;
5473
5474                 REG_WR(bp, BAR_USTRORM_INTMEM +
5475                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5476                        U64_LO(fp->rx_comp_mapping));
5477                 REG_WR(bp, BAR_USTRORM_INTMEM +
5478                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5479                        U64_HI(fp->rx_comp_mapping));
5480         }
5481 }
5482
5483 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5484 {
5485         int i, j;
5486
5487         for_each_queue(bp, j) {
5488                 struct bnx2x_fastpath *fp = &bp->fp[j];
5489
5490                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5491                         struct eth_tx_next_bd *tx_next_bd =
5492                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5493
5494                         tx_next_bd->addr_hi =
5495                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5496                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5497                         tx_next_bd->addr_lo =
5498                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5499                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5500                 }
5501
5502                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5503                 fp->tx_db.data.zero_fill1 = 0;
5504                 fp->tx_db.data.prod = 0;
5505
5506                 fp->tx_pkt_prod = 0;
5507                 fp->tx_pkt_cons = 0;
5508                 fp->tx_bd_prod = 0;
5509                 fp->tx_bd_cons = 0;
5510                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5511                 fp->tx_pkt = 0;
5512         }
5513 }
5514
5515 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5516 {
5517         int func = BP_FUNC(bp);
5518
5519         spin_lock_init(&bp->spq_lock);
5520
5521         bp->spq_left = MAX_SPQ_PENDING;
5522         bp->spq_prod_idx = 0;
5523         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5524         bp->spq_prod_bd = bp->spq;
5525         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5526
5527         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5528                U64_LO(bp->spq_mapping));
5529         REG_WR(bp,
5530                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5531                U64_HI(bp->spq_mapping));
5532
5533         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5534                bp->spq_prod_idx);
5535 }
5536
5537 static void bnx2x_init_context(struct bnx2x *bp)
5538 {
5539         int i;
5540
5541         /* Rx */
5542         for_each_queue(bp, i) {
5543                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5544                 struct bnx2x_fastpath *fp = &bp->fp[i];
5545                 u8 cl_id = fp->cl_id;
5546
5547                 context->ustorm_st_context.common.sb_index_numbers =
5548                                                 BNX2X_RX_SB_INDEX_NUM;
5549                 context->ustorm_st_context.common.clientId = cl_id;
5550                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5551                 context->ustorm_st_context.common.flags =
5552                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5553                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5554                 context->ustorm_st_context.common.statistics_counter_id =
5555                                                 cl_id;
5556                 context->ustorm_st_context.common.mc_alignment_log_size =
5557                                                 BNX2X_RX_ALIGN_SHIFT;
5558                 context->ustorm_st_context.common.bd_buff_size =
5559                                                 bp->rx_buf_size;
5560                 context->ustorm_st_context.common.bd_page_base_hi =
5561                                                 U64_HI(fp->rx_desc_mapping);
5562                 context->ustorm_st_context.common.bd_page_base_lo =
5563                                                 U64_LO(fp->rx_desc_mapping);
5564                 if (!fp->disable_tpa) {
5565                         context->ustorm_st_context.common.flags |=
5566                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5567                         context->ustorm_st_context.common.sge_buff_size =
5568                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5569                                            0xffff);
5570                         context->ustorm_st_context.common.sge_page_base_hi =
5571                                                 U64_HI(fp->rx_sge_mapping);
5572                         context->ustorm_st_context.common.sge_page_base_lo =
5573                                                 U64_LO(fp->rx_sge_mapping);
5574
5575                         context->ustorm_st_context.common.max_sges_for_packet =
5576                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5577                         context->ustorm_st_context.common.max_sges_for_packet =
5578                                 ((context->ustorm_st_context.common.
5579                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5580                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5581                 }
5582
5583                 context->ustorm_ag_context.cdu_usage =
5584                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5585                                                CDU_REGION_NUMBER_UCM_AG,
5586                                                ETH_CONNECTION_TYPE);
5587
5588                 context->xstorm_ag_context.cdu_reserved =
5589                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5590                                                CDU_REGION_NUMBER_XCM_AG,
5591                                                ETH_CONNECTION_TYPE);
5592         }
5593
5594         /* Tx */
5595         for_each_queue(bp, i) {
5596                 struct bnx2x_fastpath *fp = &bp->fp[i];
5597                 struct eth_context *context =
5598                         bnx2x_sp(bp, context[i].eth);
5599
5600                 context->cstorm_st_context.sb_index_number =
5601                                                 C_SB_ETH_TX_CQ_INDEX;
5602                 context->cstorm_st_context.status_block_id = fp->sb_id;
5603
5604                 context->xstorm_st_context.tx_bd_page_base_hi =
5605                                                 U64_HI(fp->tx_desc_mapping);
5606                 context->xstorm_st_context.tx_bd_page_base_lo =
5607                                                 U64_LO(fp->tx_desc_mapping);
5608                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5609                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5610         }
5611 }
5612
5613 static void bnx2x_init_ind_table(struct bnx2x *bp)
5614 {
5615         int func = BP_FUNC(bp);
5616         int i;
5617
5618         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5619                 return;
5620
5621         DP(NETIF_MSG_IFUP,
5622            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5623         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5624                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5625                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5626                         bp->fp->cl_id + (i % bp->num_queues));
5627 }
5628
5629 static void bnx2x_set_client_config(struct bnx2x *bp)
5630 {
5631         struct tstorm_eth_client_config tstorm_client = {0};
5632         int port = BP_PORT(bp);
5633         int i;
5634
5635         tstorm_client.mtu = bp->dev->mtu;
5636         tstorm_client.config_flags =
5637                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5638                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5639 #ifdef BCM_VLAN
5640         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5641                 tstorm_client.config_flags |=
5642                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5643                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5644         }
5645 #endif
5646
5647         for_each_queue(bp, i) {
5648                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5649
5650                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5651                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5652                        ((u32 *)&tstorm_client)[0]);
5653                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5654                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5655                        ((u32 *)&tstorm_client)[1]);
5656         }
5657
5658         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5659            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5660 }
5661
5662 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5663 {
5664         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5665         int mode = bp->rx_mode;
5666         int mask = bp->rx_mode_cl_mask;
5667         int func = BP_FUNC(bp);
5668         int port = BP_PORT(bp);
5669         int i;
5670         /* All but management unicast packets should pass to the host as well */
5671         u32 llh_mask =
5672                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5673                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5674                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5675                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5676
5677         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5678
5679         switch (mode) {
5680         case BNX2X_RX_MODE_NONE: /* no Rx */
5681                 tstorm_mac_filter.ucast_drop_all = mask;
5682                 tstorm_mac_filter.mcast_drop_all = mask;
5683                 tstorm_mac_filter.bcast_drop_all = mask;
5684                 break;
5685
5686         case BNX2X_RX_MODE_NORMAL:
5687                 tstorm_mac_filter.bcast_accept_all = mask;
5688                 break;
5689
5690         case BNX2X_RX_MODE_ALLMULTI:
5691                 tstorm_mac_filter.mcast_accept_all = mask;
5692                 tstorm_mac_filter.bcast_accept_all = mask;
5693                 break;
5694
5695         case BNX2X_RX_MODE_PROMISC:
5696                 tstorm_mac_filter.ucast_accept_all = mask;
5697                 tstorm_mac_filter.mcast_accept_all = mask;
5698                 tstorm_mac_filter.bcast_accept_all = mask;
5699                 /* pass management unicast packets as well */
5700                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5701                 break;
5702
5703         default:
5704                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5705                 break;
5706         }
5707
5708         REG_WR(bp,
5709                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5710                llh_mask);
5711
5712         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5713                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5714                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5715                        ((u32 *)&tstorm_mac_filter)[i]);
5716
5717 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5718                    ((u32 *)&tstorm_mac_filter)[i]); */
5719         }
5720
5721         if (mode != BNX2X_RX_MODE_NONE)
5722                 bnx2x_set_client_config(bp);
5723 }
5724
5725 static void bnx2x_init_internal_common(struct bnx2x *bp)
5726 {
5727         int i;
5728
5729         /* Zero this manually as its initialization is
5730            currently missing in the initTool */
5731         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5732                 REG_WR(bp, BAR_USTRORM_INTMEM +
5733                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5734 }
5735
5736 static void bnx2x_init_internal_port(struct bnx2x *bp)
5737 {
5738         int port = BP_PORT(bp);
5739
5740         REG_WR(bp,
5741                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5742         REG_WR(bp,
5743                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5744         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5745         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5746 }
5747
5748 static void bnx2x_init_internal_func(struct bnx2x *bp)
5749 {
5750         struct tstorm_eth_function_common_config tstorm_config = {0};
5751         struct stats_indication_flags stats_flags = {0};
5752         int port = BP_PORT(bp);
5753         int func = BP_FUNC(bp);
5754         int i, j;
5755         u32 offset;
5756         u16 max_agg_size;
5757
5758         tstorm_config.config_flags = RSS_FLAGS(bp);
5759
5760         if (is_multi(bp))
5761                 tstorm_config.rss_result_mask = MULTI_MASK;
5762
5763         /* Enable TPA if needed */
5764         if (bp->flags & TPA_ENABLE_FLAG)
5765                 tstorm_config.config_flags |=
5766                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5767
5768         if (IS_E1HMF(bp))
5769                 tstorm_config.config_flags |=
5770                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5771
5772         tstorm_config.leading_client_id = BP_L_ID(bp);
5773
5774         REG_WR(bp, BAR_TSTRORM_INTMEM +
5775                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5776                (*(u32 *)&tstorm_config));
5777
5778         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5779         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5780         bnx2x_set_storm_rx_mode(bp);
5781
5782         for_each_queue(bp, i) {
5783                 u8 cl_id = bp->fp[i].cl_id;
5784
5785                 /* reset xstorm per client statistics */
5786                 offset = BAR_XSTRORM_INTMEM +
5787                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5788                 for (j = 0;
5789                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5790                         REG_WR(bp, offset + j*4, 0);
5791
5792                 /* reset tstorm per client statistics */
5793                 offset = BAR_TSTRORM_INTMEM +
5794                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5795                 for (j = 0;
5796                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5797                         REG_WR(bp, offset + j*4, 0);
5798
5799                 /* reset ustorm per client statistics */
5800                 offset = BAR_USTRORM_INTMEM +
5801                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5802                 for (j = 0;
5803                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5804                         REG_WR(bp, offset + j*4, 0);
5805         }
5806
5807         /* Init statistics related context */
5808         stats_flags.collect_eth = 1;
5809
5810         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5811                ((u32 *)&stats_flags)[0]);
5812         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5813                ((u32 *)&stats_flags)[1]);
5814
5815         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5816                ((u32 *)&stats_flags)[0]);
5817         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5818                ((u32 *)&stats_flags)[1]);
5819
5820         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5821                ((u32 *)&stats_flags)[0]);
5822         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5823                ((u32 *)&stats_flags)[1]);
5824
5825         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5826                ((u32 *)&stats_flags)[0]);
5827         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5828                ((u32 *)&stats_flags)[1]);
5829
5830         REG_WR(bp, BAR_XSTRORM_INTMEM +
5831                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5832                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5833         REG_WR(bp, BAR_XSTRORM_INTMEM +
5834                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5835                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5836
5837         REG_WR(bp, BAR_TSTRORM_INTMEM +
5838                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5839                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5840         REG_WR(bp, BAR_TSTRORM_INTMEM +
5841                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5842                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5843
5844         REG_WR(bp, BAR_USTRORM_INTMEM +
5845                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5846                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5847         REG_WR(bp, BAR_USTRORM_INTMEM +
5848                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5849                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5850
5851         if (CHIP_IS_E1H(bp)) {
5852                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5853                         IS_E1HMF(bp));
5854                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5855                         IS_E1HMF(bp));
5856                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5857                         IS_E1HMF(bp));
5858                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5859                         IS_E1HMF(bp));
5860
5861                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5862                          bp->e1hov);
5863         }
5864
5865         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5866         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5867                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5868         for_each_queue(bp, i) {
5869                 struct bnx2x_fastpath *fp = &bp->fp[i];
5870
5871                 REG_WR(bp, BAR_USTRORM_INTMEM +
5872                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5873                        U64_LO(fp->rx_comp_mapping));
5874                 REG_WR(bp, BAR_USTRORM_INTMEM +
5875                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5876                        U64_HI(fp->rx_comp_mapping));
5877
5878                 /* Next page */
5879                 REG_WR(bp, BAR_USTRORM_INTMEM +
5880                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5881                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5882                 REG_WR(bp, BAR_USTRORM_INTMEM +
5883                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5884                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5885
5886                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5887                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5888                          max_agg_size);
5889         }
5890
5891         /* dropless flow control */
5892         if (CHIP_IS_E1H(bp)) {
5893                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5894
5895                 rx_pause.bd_thr_low = 250;
5896                 rx_pause.cqe_thr_low = 250;
5897                 rx_pause.cos = 1;
5898                 rx_pause.sge_thr_low = 0;
5899                 rx_pause.bd_thr_high = 350;
5900                 rx_pause.cqe_thr_high = 350;
5901                 rx_pause.sge_thr_high = 0;
5902
5903                 for_each_queue(bp, i) {
5904                         struct bnx2x_fastpath *fp = &bp->fp[i];
5905
5906                         if (!fp->disable_tpa) {
5907                                 rx_pause.sge_thr_low = 150;
5908                                 rx_pause.sge_thr_high = 250;
5909                         }
5910
5911
5912                         offset = BAR_USTRORM_INTMEM +
5913                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5914                                                                    fp->cl_id);
5915                         for (j = 0;
5916                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5917                              j++)
5918                                 REG_WR(bp, offset + j*4,
5919                                        ((u32 *)&rx_pause)[j]);
5920                 }
5921         }
5922
5923         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5924
5925         /* Init rate shaping and fairness contexts */
5926         if (IS_E1HMF(bp)) {
5927                 int vn;
5928
5929                 /* During init there is no active link
5930                    Until link is up, set link rate to 10Gbps */
5931                 bp->link_vars.line_speed = SPEED_10000;
5932                 bnx2x_init_port_minmax(bp);
5933
5934                 if (!BP_NOMCP(bp))
5935                         bp->mf_config =
5936                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5937                 bnx2x_calc_vn_weight_sum(bp);
5938
5939                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5940                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5941
5942                 /* Enable rate shaping and fairness */
5943                 bp->cmng.flags.cmng_enables |=
5944                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5945
5946         } else {
5947                 /* rate shaping and fairness are disabled */
5948                 DP(NETIF_MSG_IFUP,
5949                    "single function mode  minmax will be disabled\n");
5950         }
5951
5952
5953         /* Store cmng structures to internal memory */
5954         if (bp->port.pmf)
5955                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5956                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5957                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5958                                ((u32 *)(&bp->cmng))[i]);
5959 }
5960
5961 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5962 {
5963         switch (load_code) {
5964         case FW_MSG_CODE_DRV_LOAD_COMMON:
5965                 bnx2x_init_internal_common(bp);
5966                 /* no break */
5967
5968         case FW_MSG_CODE_DRV_LOAD_PORT:
5969                 bnx2x_init_internal_port(bp);
5970                 /* no break */
5971
5972         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5973                 bnx2x_init_internal_func(bp);
5974                 break;
5975
5976         default:
5977                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5978                 break;
5979         }
5980 }
5981
5982 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5983 {
5984         int i;
5985
5986         for_each_queue(bp, i) {
5987                 struct bnx2x_fastpath *fp = &bp->fp[i];
5988
5989                 fp->bp = bp;
5990                 fp->state = BNX2X_FP_STATE_CLOSED;
5991                 fp->index = i;
5992                 fp->cl_id = BP_L_ID(bp) + i;
5993 #ifdef BCM_CNIC
5994                 fp->sb_id = fp->cl_id + 1;
5995 #else
5996                 fp->sb_id = fp->cl_id;
5997 #endif
5998                 DP(NETIF_MSG_IFUP,
5999                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
6000                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
6001                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
6002                               fp->sb_id);
6003                 bnx2x_update_fpsb_idx(fp);
6004         }
6005
6006         /* ensure status block indices were read */
6007         rmb();
6008
6009
6010         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6011                           DEF_SB_ID);
6012         bnx2x_update_dsb_idx(bp);
6013         bnx2x_update_coalesce(bp);
6014         bnx2x_init_rx_rings(bp);
6015         bnx2x_init_tx_ring(bp);
6016         bnx2x_init_sp_ring(bp);
6017         bnx2x_init_context(bp);
6018         bnx2x_init_internal(bp, load_code);
6019         bnx2x_init_ind_table(bp);
6020         bnx2x_stats_init(bp);
6021
6022         /* At this point, we are ready for interrupts */
6023         atomic_set(&bp->intr_sem, 0);
6024
6025         /* flush all before enabling interrupts */
6026         mb();
6027         mmiowb();
6028
6029         bnx2x_int_enable(bp);
6030
6031         /* Check for SPIO5 */
6032         bnx2x_attn_int_deasserted0(bp,
6033                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6034                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6035 }
6036
6037 /* end of nic init */
6038
6039 /*
6040  * gzip service functions
6041  */
6042
6043 static int bnx2x_gunzip_init(struct bnx2x *bp)
6044 {
6045         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6046                                             &bp->gunzip_mapping, GFP_KERNEL);
6047         if (bp->gunzip_buf  == NULL)
6048                 goto gunzip_nomem1;
6049
6050         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6051         if (bp->strm  == NULL)
6052                 goto gunzip_nomem2;
6053
6054         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6055                                       GFP_KERNEL);
6056         if (bp->strm->workspace == NULL)
6057                 goto gunzip_nomem3;
6058
6059         return 0;
6060
6061 gunzip_nomem3:
6062         kfree(bp->strm);
6063         bp->strm = NULL;
6064
6065 gunzip_nomem2:
6066         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6067                           bp->gunzip_mapping);
6068         bp->gunzip_buf = NULL;
6069
6070 gunzip_nomem1:
6071         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6072                " un-compression\n");
6073         return -ENOMEM;
6074 }
6075
6076 static void bnx2x_gunzip_end(struct bnx2x *bp)
6077 {
6078         kfree(bp->strm->workspace);
6079
6080         kfree(bp->strm);
6081         bp->strm = NULL;
6082
6083         if (bp->gunzip_buf) {
6084                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6085                                   bp->gunzip_mapping);
6086                 bp->gunzip_buf = NULL;
6087         }
6088 }
6089
6090 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6091 {
6092         int n, rc;
6093
6094         /* check gzip header */
6095         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6096                 BNX2X_ERR("Bad gzip header\n");
6097                 return -EINVAL;
6098         }
6099
6100         n = 10;
6101
6102 #define FNAME                           0x8
6103
6104         if (zbuf[3] & FNAME)
6105                 while ((zbuf[n++] != 0) && (n < len));
6106
6107         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6108         bp->strm->avail_in = len - n;
6109         bp->strm->next_out = bp->gunzip_buf;
6110         bp->strm->avail_out = FW_BUF_SIZE;
6111
6112         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6113         if (rc != Z_OK)
6114                 return rc;
6115
6116         rc = zlib_inflate(bp->strm, Z_FINISH);
6117         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6118                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6119                            bp->strm->msg);
6120
6121         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6122         if (bp->gunzip_outlen & 0x3)
6123                 netdev_err(bp->dev, "Firmware decompression error:"
6124                                     " gunzip_outlen (%d) not aligned\n",
6125                                 bp->gunzip_outlen);
6126         bp->gunzip_outlen >>= 2;
6127
6128         zlib_inflateEnd(bp->strm);
6129
6130         if (rc == Z_STREAM_END)
6131                 return 0;
6132
6133         return rc;
6134 }
6135
6136 /* nic load/unload */
6137
6138 /*
6139  * General service functions
6140  */
6141
6142 /* send a NIG loopback debug packet */
6143 static void bnx2x_lb_pckt(struct bnx2x *bp)
6144 {
6145         u32 wb_write[3];
6146
6147         /* Ethernet source and destination addresses */
6148         wb_write[0] = 0x55555555;
6149         wb_write[1] = 0x55555555;
6150         wb_write[2] = 0x20;             /* SOP */
6151         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6152
6153         /* NON-IP protocol */
6154         wb_write[0] = 0x09000000;
6155         wb_write[1] = 0x55555555;
6156         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6157         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6158 }
6159
6160 /* some of the internal memories
6161  * are not directly readable from the driver
6162  * to test them we send debug packets
6163  */
6164 static int bnx2x_int_mem_test(struct bnx2x *bp)
6165 {
6166         int factor;
6167         int count, i;
6168         u32 val = 0;
6169
6170         if (CHIP_REV_IS_FPGA(bp))
6171                 factor = 120;
6172         else if (CHIP_REV_IS_EMUL(bp))
6173                 factor = 200;
6174         else
6175                 factor = 1;
6176
6177         DP(NETIF_MSG_HW, "start part1\n");
6178
6179         /* Disable inputs of parser neighbor blocks */
6180         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6181         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6182         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6183         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6184
6185         /*  Write 0 to parser credits for CFC search request */
6186         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6187
6188         /* send Ethernet packet */
6189         bnx2x_lb_pckt(bp);
6190
6191         /* TODO do i reset NIG statistic? */
6192         /* Wait until NIG register shows 1 packet of size 0x10 */
6193         count = 1000 * factor;
6194         while (count) {
6195
6196                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6197                 val = *bnx2x_sp(bp, wb_data[0]);
6198                 if (val == 0x10)
6199                         break;
6200
6201                 msleep(10);
6202                 count--;
6203         }
6204         if (val != 0x10) {
6205                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6206                 return -1;
6207         }
6208
6209         /* Wait until PRS register shows 1 packet */
6210         count = 1000 * factor;
6211         while (count) {
6212                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6213                 if (val == 1)
6214                         break;
6215
6216                 msleep(10);
6217                 count--;
6218         }
6219         if (val != 0x1) {
6220                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6221                 return -2;
6222         }
6223
6224         /* Reset and init BRB, PRS */
6225         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6226         msleep(50);
6227         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6228         msleep(50);
6229         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6230         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6231
6232         DP(NETIF_MSG_HW, "part2\n");
6233
6234         /* Disable inputs of parser neighbor blocks */
6235         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6236         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6237         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6238         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6239
6240         /* Write 0 to parser credits for CFC search request */
6241         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6242
6243         /* send 10 Ethernet packets */
6244         for (i = 0; i < 10; i++)
6245                 bnx2x_lb_pckt(bp);
6246
6247         /* Wait until NIG register shows 10 + 1
6248            packets of size 11*0x10 = 0xb0 */
6249         count = 1000 * factor;
6250         while (count) {
6251
6252                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6253                 val = *bnx2x_sp(bp, wb_data[0]);
6254                 if (val == 0xb0)
6255                         break;
6256
6257                 msleep(10);
6258                 count--;
6259         }
6260         if (val != 0xb0) {
6261                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6262                 return -3;
6263         }
6264
6265         /* Wait until PRS register shows 2 packets */
6266         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6267         if (val != 2)
6268                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6269
6270         /* Write 1 to parser credits for CFC search request */
6271         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6272
6273         /* Wait until PRS register shows 3 packets */
6274         msleep(10 * factor);
6275         /* Wait until NIG register shows 1 packet of size 0x10 */
6276         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6277         if (val != 3)
6278                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6279
6280         /* clear NIG EOP FIFO */
6281         for (i = 0; i < 11; i++)
6282                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6283         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6284         if (val != 1) {
6285                 BNX2X_ERR("clear of NIG failed\n");
6286                 return -4;
6287         }
6288
6289         /* Reset and init BRB, PRS, NIG */
6290         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6291         msleep(50);
6292         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6293         msleep(50);
6294         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6295         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6296 #ifndef BCM_CNIC
6297         /* set NIC mode */
6298         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6299 #endif
6300
6301         /* Enable inputs of parser neighbor blocks */
6302         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6303         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6304         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6305         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6306
6307         DP(NETIF_MSG_HW, "done\n");
6308
6309         return 0; /* OK */
6310 }
6311
6312 static void enable_blocks_attention(struct bnx2x *bp)
6313 {
6314         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6315         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6316         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6317         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6318         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6319         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6320         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6321         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6322         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6323 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6324 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6325         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6326         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6327         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6328 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6329 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6330         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6331         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6332         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6333         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6334 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6335 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6336         if (CHIP_REV_IS_FPGA(bp))
6337                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6338         else
6339                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6340         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6341         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6342         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6343 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6344 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6345         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6346         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6347 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6348         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
6349 }
6350
6351 static const struct {
6352         u32 addr;
6353         u32 mask;
6354 } bnx2x_parity_mask[] = {
6355         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6356         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6357         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6358         {HC_REG_HC_PRTY_MASK, 0xffffffff},
6359         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6360         {QM_REG_QM_PRTY_MASK, 0x0},
6361         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6362         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6363         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6364         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6365         {CDU_REG_CDU_PRTY_MASK, 0x0},
6366         {CFC_REG_CFC_PRTY_MASK, 0x0},
6367         {DBG_REG_DBG_PRTY_MASK, 0x0},
6368         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6369         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6370         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6371         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6372         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6373         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6374         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6375         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6376         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6377         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6378         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6379         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6380         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6381         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6382         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6383 };
6384
6385 static void enable_blocks_parity(struct bnx2x *bp)
6386 {
6387         int i, mask_arr_len =
6388                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6389
6390         for (i = 0; i < mask_arr_len; i++)
6391                 REG_WR(bp, bnx2x_parity_mask[i].addr,
6392                         bnx2x_parity_mask[i].mask);
6393 }
6394
6395
6396 static void bnx2x_reset_common(struct bnx2x *bp)
6397 {
6398         /* reset_common */
6399         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6400                0xd3ffff7f);
6401         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6402 }
6403
6404 static void bnx2x_init_pxp(struct bnx2x *bp)
6405 {
6406         u16 devctl;
6407         int r_order, w_order;
6408
6409         pci_read_config_word(bp->pdev,
6410                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6411         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6412         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6413         if (bp->mrrs == -1)
6414                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6415         else {
6416                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6417                 r_order = bp->mrrs;
6418         }
6419
6420         bnx2x_init_pxp_arb(bp, r_order, w_order);
6421 }
6422
6423 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6424 {
6425         int is_required;
6426         u32 val;
6427         int port;
6428
6429         if (BP_NOMCP(bp))
6430                 return;
6431
6432         is_required = 0;
6433         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6434               SHARED_HW_CFG_FAN_FAILURE_MASK;
6435
6436         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6437                 is_required = 1;
6438
6439         /*
6440          * The fan failure mechanism is usually related to the PHY type since
6441          * the power consumption of the board is affected by the PHY. Currently,
6442          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6443          */
6444         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6445                 for (port = PORT_0; port < PORT_MAX; port++) {
6446                         u32 phy_type =
6447                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6448                                          external_phy_config) &
6449                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6450                         is_required |=
6451                                 ((phy_type ==
6452                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6453                                  (phy_type ==
6454                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6455                                  (phy_type ==
6456                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6457                 }
6458
6459         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6460
6461         if (is_required == 0)
6462                 return;
6463
6464         /* Fan failure is indicated by SPIO 5 */
6465         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6466                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6467
6468         /* set to active low mode */
6469         val = REG_RD(bp, MISC_REG_SPIO_INT);
6470         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6471                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6472         REG_WR(bp, MISC_REG_SPIO_INT, val);
6473
6474         /* enable interrupt to signal the IGU */
6475         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6476         val |= (1 << MISC_REGISTERS_SPIO_5);
6477         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6478 }
6479
6480 static int bnx2x_init_common(struct bnx2x *bp)
6481 {
6482         u32 val, i;
6483 #ifdef BCM_CNIC
6484         u32 wb_write[2];
6485 #endif
6486
6487         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6488
6489         bnx2x_reset_common(bp);
6490         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6491         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6492
6493         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6494         if (CHIP_IS_E1H(bp))
6495                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6496
6497         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6498         msleep(30);
6499         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6500
6501         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6502         if (CHIP_IS_E1(bp)) {
6503                 /* enable HW interrupt from PXP on USDM overflow
6504                    bit 16 on INT_MASK_0 */
6505                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6506         }
6507
6508         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6509         bnx2x_init_pxp(bp);
6510
6511 #ifdef __BIG_ENDIAN
6512         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6513         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6514         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6515         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6516         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6517         /* make sure this value is 0 */
6518         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6519
6520 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6521         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6522         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6523         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6524         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6525 #endif
6526
6527         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6528 #ifdef BCM_CNIC
6529         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6530         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6531         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6532 #endif
6533
6534         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6535                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6536
6537         /* let the HW do it's magic ... */
6538         msleep(100);
6539         /* finish PXP init */
6540         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6541         if (val != 1) {
6542                 BNX2X_ERR("PXP2 CFG failed\n");
6543                 return -EBUSY;
6544         }
6545         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6546         if (val != 1) {
6547                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6548                 return -EBUSY;
6549         }
6550
6551         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6552         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6553
6554         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6555
6556         /* clean the DMAE memory */
6557         bp->dmae_ready = 1;
6558         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6559
6560         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6561         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6562         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6563         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6564
6565         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6566         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6567         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6568         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6569
6570         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6571
6572 #ifdef BCM_CNIC
6573         wb_write[0] = 0;
6574         wb_write[1] = 0;
6575         for (i = 0; i < 64; i++) {
6576                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6577                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6578
6579                 if (CHIP_IS_E1H(bp)) {
6580                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6581                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6582                                           wb_write, 2);
6583                 }
6584         }
6585 #endif
6586         /* soft reset pulse */
6587         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6588         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6589
6590 #ifdef BCM_CNIC
6591         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6592 #endif
6593
6594         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6595         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6596         if (!CHIP_REV_IS_SLOW(bp)) {
6597                 /* enable hw interrupt from doorbell Q */
6598                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6599         }
6600
6601         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6602         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6603         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6604 #ifndef BCM_CNIC
6605         /* set NIC mode */
6606         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6607 #endif
6608         if (CHIP_IS_E1H(bp))
6609                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6610
6611         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6612         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6613         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6614         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6615
6616         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6617         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6618         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6619         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6620
6621         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6622         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6623         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6624         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6625
6626         /* sync semi rtc */
6627         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6628                0x80000000);
6629         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6630                0x80000000);
6631
6632         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6633         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6634         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6635
6636         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6637         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6638                 REG_WR(bp, i, random32());
6639         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6640 #ifdef BCM_CNIC
6641         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6642         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6643         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6644         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6645         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6646         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6647         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6648         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6649         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6650         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6651 #endif
6652         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6653
6654         if (sizeof(union cdu_context) != 1024)
6655                 /* we currently assume that a context is 1024 bytes */
6656                 dev_alert(&bp->pdev->dev, "please adjust the size "
6657                                           "of cdu_context(%ld)\n",
6658                          (long)sizeof(union cdu_context));
6659
6660         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6661         val = (4 << 24) + (0 << 12) + 1024;
6662         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6663
6664         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6665         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6666         /* enable context validation interrupt from CFC */
6667         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6668
6669         /* set the thresholds to prevent CFC/CDU race */
6670         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6671
6672         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6673         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6674
6675         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6676         /* Reset PCIE errors for debug */
6677         REG_WR(bp, 0x2814, 0xffffffff);
6678         REG_WR(bp, 0x3820, 0xffffffff);
6679
6680         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6681         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6682         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6683         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6684
6685         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6686         if (CHIP_IS_E1H(bp)) {
6687                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6688                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6689         }
6690
6691         if (CHIP_REV_IS_SLOW(bp))
6692                 msleep(200);
6693
6694         /* finish CFC init */
6695         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6696         if (val != 1) {
6697                 BNX2X_ERR("CFC LL_INIT failed\n");
6698                 return -EBUSY;
6699         }
6700         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6701         if (val != 1) {
6702                 BNX2X_ERR("CFC AC_INIT failed\n");
6703                 return -EBUSY;
6704         }
6705         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6706         if (val != 1) {
6707                 BNX2X_ERR("CFC CAM_INIT failed\n");
6708                 return -EBUSY;
6709         }
6710         REG_WR(bp, CFC_REG_DEBUG0, 0);
6711
6712         /* read NIG statistic
6713            to see if this is our first up since powerup */
6714         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6715         val = *bnx2x_sp(bp, wb_data[0]);
6716
6717         /* do internal memory self test */
6718         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6719                 BNX2X_ERR("internal mem self test failed\n");
6720                 return -EBUSY;
6721         }
6722
6723         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6724         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6725         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6726         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6727         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6728                 bp->port.need_hw_lock = 1;
6729                 break;
6730
6731         default:
6732                 break;
6733         }
6734
6735         bnx2x_setup_fan_failure_detection(bp);
6736
6737         /* clear PXP2 attentions */
6738         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6739
6740         enable_blocks_attention(bp);
6741         if (CHIP_PARITY_SUPPORTED(bp))
6742                 enable_blocks_parity(bp);
6743
6744         if (!BP_NOMCP(bp)) {
6745                 bnx2x_acquire_phy_lock(bp);
6746                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6747                 bnx2x_release_phy_lock(bp);
6748         } else
6749                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6750
6751         return 0;
6752 }
6753
6754 static int bnx2x_init_port(struct bnx2x *bp)
6755 {
6756         int port = BP_PORT(bp);
6757         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6758         u32 low, high;
6759         u32 val;
6760
6761         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
6762
6763         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6764
6765         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6766         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6767
6768         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6769         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6770         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6771         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6772
6773 #ifdef BCM_CNIC
6774         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6775
6776         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6777         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6778         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6779 #endif
6780
6781         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6782
6783         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6784         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6785                 /* no pause for emulation and FPGA */
6786                 low = 0;
6787                 high = 513;
6788         } else {
6789                 if (IS_E1HMF(bp))
6790                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6791                 else if (bp->dev->mtu > 4096) {
6792                         if (bp->flags & ONE_PORT_FLAG)
6793                                 low = 160;
6794                         else {
6795                                 val = bp->dev->mtu;
6796                                 /* (24*1024 + val*4)/256 */
6797                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6798                         }
6799                 } else
6800                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6801                 high = low + 56;        /* 14*1024/256 */
6802         }
6803         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6804         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6805
6806
6807         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6808
6809         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6810         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6811         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6812         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6813
6814         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6815         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6816         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6817         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6818
6819         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6820         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6821
6822         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6823
6824         /* configure PBF to work without PAUSE mtu 9000 */
6825         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6826
6827         /* update threshold */
6828         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6829         /* update init credit */
6830         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6831
6832         /* probe changes */
6833         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6834         msleep(5);
6835         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6836
6837 #ifdef BCM_CNIC
6838         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6839 #endif
6840         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6841         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6842
6843         if (CHIP_IS_E1(bp)) {
6844                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6845                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6846         }
6847         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6848
6849         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6850         /* init aeu_mask_attn_func_0/1:
6851          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6852          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6853          *             bits 4-7 are used for "per vn group attention" */
6854         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6855                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6856
6857         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6858         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6859         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6860         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6861         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6862
6863         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6864
6865         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6866
6867         if (CHIP_IS_E1H(bp)) {
6868                 /* 0x2 disable e1hov, 0x1 enable */
6869                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6870                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6871
6872                 {
6873                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6874                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6875                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6876                 }
6877         }
6878
6879         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6880         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6881
6882         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6883         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6884                 {
6885                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6886
6887                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6888                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6889
6890                 /* The GPIO should be swapped if the swap register is
6891                    set and active */
6892                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6893                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6894
6895                 /* Select function upon port-swap configuration */
6896                 if (port == 0) {
6897                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6898                         aeu_gpio_mask = (swap_val && swap_override) ?
6899                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6900                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6901                 } else {
6902                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6903                         aeu_gpio_mask = (swap_val && swap_override) ?
6904                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6905                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6906                 }
6907                 val = REG_RD(bp, offset);
6908                 /* add GPIO3 to group */
6909                 val |= aeu_gpio_mask;
6910                 REG_WR(bp, offset, val);
6911                 }
6912                 break;
6913
6914         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6915         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6916                 /* add SPIO 5 to group 0 */
6917                 {
6918                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6919                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6920                 val = REG_RD(bp, reg_addr);
6921                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6922                 REG_WR(bp, reg_addr, val);
6923                 }
6924                 break;
6925
6926         default:
6927                 break;
6928         }
6929
6930         bnx2x__link_reset(bp);
6931
6932         return 0;
6933 }
6934
6935 #define ILT_PER_FUNC            (768/2)
6936 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6937 /* the phys address is shifted right 12 bits and has an added
6938    1=valid bit added to the 53rd bit
6939    then since this is a wide register(TM)
6940    we split it into two 32 bit writes
6941  */
6942 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6943 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6944 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6945 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6946
6947 #ifdef BCM_CNIC
6948 #define CNIC_ILT_LINES          127
6949 #define CNIC_CTX_PER_ILT        16
6950 #else
6951 #define CNIC_ILT_LINES          0
6952 #endif
6953
6954 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6955 {
6956         int reg;
6957
6958         if (CHIP_IS_E1H(bp))
6959                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6960         else /* E1 */
6961                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6962
6963         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6964 }
6965
6966 static int bnx2x_init_func(struct bnx2x *bp)
6967 {
6968         int port = BP_PORT(bp);
6969         int func = BP_FUNC(bp);
6970         u32 addr, val;
6971         int i;
6972
6973         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
6974
6975         /* set MSI reconfigure capability */
6976         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6977         val = REG_RD(bp, addr);
6978         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6979         REG_WR(bp, addr, val);
6980
6981         i = FUNC_ILT_BASE(func);
6982
6983         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6984         if (CHIP_IS_E1H(bp)) {
6985                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6986                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6987         } else /* E1 */
6988                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6989                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6990
6991 #ifdef BCM_CNIC
6992         i += 1 + CNIC_ILT_LINES;
6993         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6994         if (CHIP_IS_E1(bp))
6995                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6996         else {
6997                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6998                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6999         }
7000
7001         i++;
7002         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7003         if (CHIP_IS_E1(bp))
7004                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7005         else {
7006                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7007                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7008         }
7009
7010         i++;
7011         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7012         if (CHIP_IS_E1(bp))
7013                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7014         else {
7015                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7016                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7017         }
7018
7019         /* tell the searcher where the T2 table is */
7020         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7021
7022         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7023                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7024
7025         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7026                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7027                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7028
7029         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7030 #endif
7031
7032         if (CHIP_IS_E1H(bp)) {
7033                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7034                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7035                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7036                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7037                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7038                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7039                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7040                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7041                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7042
7043                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7044                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7045         }
7046
7047         /* HC init per function */
7048         if (CHIP_IS_E1H(bp)) {
7049                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7050
7051                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7052                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7053         }
7054         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7055
7056         /* Reset PCIE errors for debug */
7057         REG_WR(bp, 0x2114, 0xffffffff);
7058         REG_WR(bp, 0x2120, 0xffffffff);
7059
7060         return 0;
7061 }
7062
7063 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7064 {
7065         int i, rc = 0;
7066
7067         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
7068            BP_FUNC(bp), load_code);
7069
7070         bp->dmae_ready = 0;
7071         mutex_init(&bp->dmae_mutex);
7072         rc = bnx2x_gunzip_init(bp);
7073         if (rc)
7074                 return rc;
7075
7076         switch (load_code) {
7077         case FW_MSG_CODE_DRV_LOAD_COMMON:
7078                 rc = bnx2x_init_common(bp);
7079                 if (rc)
7080                         goto init_hw_err;
7081                 /* no break */
7082
7083         case FW_MSG_CODE_DRV_LOAD_PORT:
7084                 bp->dmae_ready = 1;
7085                 rc = bnx2x_init_port(bp);
7086                 if (rc)
7087                         goto init_hw_err;
7088                 /* no break */
7089
7090         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7091                 bp->dmae_ready = 1;
7092                 rc = bnx2x_init_func(bp);
7093                 if (rc)
7094                         goto init_hw_err;
7095                 break;
7096
7097         default:
7098                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7099                 break;
7100         }
7101
7102         if (!BP_NOMCP(bp)) {
7103                 int func = BP_FUNC(bp);
7104
7105                 bp->fw_drv_pulse_wr_seq =
7106                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7107                                  DRV_PULSE_SEQ_MASK);
7108                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7109         }
7110
7111         /* this needs to be done before gunzip end */
7112         bnx2x_zero_def_sb(bp);
7113         for_each_queue(bp, i)
7114                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7115 #ifdef BCM_CNIC
7116         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7117 #endif
7118
7119 init_hw_err:
7120         bnx2x_gunzip_end(bp);
7121
7122         return rc;
7123 }
7124
7125 static void bnx2x_free_mem(struct bnx2x *bp)
7126 {
7127
7128 #define BNX2X_PCI_FREE(x, y, size) \
7129         do { \
7130                 if (x) { \
7131                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
7132                         x = NULL; \
7133                         y = 0; \
7134                 } \
7135         } while (0)
7136
7137 #define BNX2X_FREE(x) \
7138         do { \
7139                 if (x) { \
7140                         vfree(x); \
7141                         x = NULL; \
7142                 } \
7143         } while (0)
7144
7145         int i;
7146
7147         /* fastpath */
7148         /* Common */
7149         for_each_queue(bp, i) {
7150
7151                 /* status blocks */
7152                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7153                                bnx2x_fp(bp, i, status_blk_mapping),
7154                                sizeof(struct host_status_block));
7155         }
7156         /* Rx */
7157         for_each_queue(bp, i) {
7158
7159                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7160                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7161                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7162                                bnx2x_fp(bp, i, rx_desc_mapping),
7163                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
7164
7165                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7166                                bnx2x_fp(bp, i, rx_comp_mapping),
7167                                sizeof(struct eth_fast_path_rx_cqe) *
7168                                NUM_RCQ_BD);
7169
7170                 /* SGE ring */
7171                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7172                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7173                                bnx2x_fp(bp, i, rx_sge_mapping),
7174                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7175         }
7176         /* Tx */
7177         for_each_queue(bp, i) {
7178
7179                 /* fastpath tx rings: tx_buf tx_desc */
7180                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7181                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7182                                bnx2x_fp(bp, i, tx_desc_mapping),
7183                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7184         }
7185         /* end of fastpath */
7186
7187         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7188                        sizeof(struct host_def_status_block));
7189
7190         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7191                        sizeof(struct bnx2x_slowpath));
7192
7193 #ifdef BCM_CNIC
7194         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7195         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7196         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7197         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7198         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7199                        sizeof(struct host_status_block));
7200 #endif
7201         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7202
7203 #undef BNX2X_PCI_FREE
7204 #undef BNX2X_KFREE
7205 }
7206
7207 static int bnx2x_alloc_mem(struct bnx2x *bp)
7208 {
7209
7210 #define BNX2X_PCI_ALLOC(x, y, size) \
7211         do { \
7212                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7213                 if (x == NULL) \
7214                         goto alloc_mem_err; \
7215                 memset(x, 0, size); \
7216         } while (0)
7217
7218 #define BNX2X_ALLOC(x, size) \
7219         do { \
7220                 x = vmalloc(size); \
7221                 if (x == NULL) \
7222                         goto alloc_mem_err; \
7223                 memset(x, 0, size); \
7224         } while (0)
7225
7226         int i;
7227
7228         /* fastpath */
7229         /* Common */
7230         for_each_queue(bp, i) {
7231                 bnx2x_fp(bp, i, bp) = bp;
7232
7233                 /* status blocks */
7234                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7235                                 &bnx2x_fp(bp, i, status_blk_mapping),
7236                                 sizeof(struct host_status_block));
7237         }
7238         /* Rx */
7239         for_each_queue(bp, i) {
7240
7241                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7242                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7243                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7244                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7245                                 &bnx2x_fp(bp, i, rx_desc_mapping),
7246                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7247
7248                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7249                                 &bnx2x_fp(bp, i, rx_comp_mapping),
7250                                 sizeof(struct eth_fast_path_rx_cqe) *
7251                                 NUM_RCQ_BD);
7252
7253                 /* SGE ring */
7254                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7255                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7256                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7257                                 &bnx2x_fp(bp, i, rx_sge_mapping),
7258                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7259         }
7260         /* Tx */
7261         for_each_queue(bp, i) {
7262
7263                 /* fastpath tx rings: tx_buf tx_desc */
7264                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7265                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7266                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7267                                 &bnx2x_fp(bp, i, tx_desc_mapping),
7268                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7269         }
7270         /* end of fastpath */
7271
7272         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7273                         sizeof(struct host_def_status_block));
7274
7275         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7276                         sizeof(struct bnx2x_slowpath));
7277
7278 #ifdef BCM_CNIC
7279         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7280
7281         /* allocate searcher T2 table
7282            we allocate 1/4 of alloc num for T2
7283           (which is not entered into the ILT) */
7284         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7285
7286         /* Initialize T2 (for 1024 connections) */
7287         for (i = 0; i < 16*1024; i += 64)
7288                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7289
7290         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7291         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7292
7293         /* QM queues (128*MAX_CONN) */
7294         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7295
7296         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7297                         sizeof(struct host_status_block));
7298 #endif
7299
7300         /* Slow path ring */
7301         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7302
7303         return 0;
7304
7305 alloc_mem_err:
7306         bnx2x_free_mem(bp);
7307         return -ENOMEM;
7308
7309 #undef BNX2X_PCI_ALLOC
7310 #undef BNX2X_ALLOC
7311 }
7312
7313 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7314 {
7315         int i;
7316
7317         for_each_queue(bp, i) {
7318                 struct bnx2x_fastpath *fp = &bp->fp[i];
7319
7320                 u16 bd_cons = fp->tx_bd_cons;
7321                 u16 sw_prod = fp->tx_pkt_prod;
7322                 u16 sw_cons = fp->tx_pkt_cons;
7323
7324                 while (sw_cons != sw_prod) {
7325                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7326                         sw_cons++;
7327                 }
7328         }
7329 }
7330
7331 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7332 {
7333         int i, j;
7334
7335         for_each_queue(bp, j) {
7336                 struct bnx2x_fastpath *fp = &bp->fp[j];
7337
7338                 for (i = 0; i < NUM_RX_BD; i++) {
7339                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7340                         struct sk_buff *skb = rx_buf->skb;
7341
7342                         if (skb == NULL)
7343                                 continue;
7344
7345                         dma_unmap_single(&bp->pdev->dev,
7346                                          dma_unmap_addr(rx_buf, mapping),
7347                                          bp->rx_buf_size, DMA_FROM_DEVICE);
7348
7349                         rx_buf->skb = NULL;
7350                         dev_kfree_skb(skb);
7351                 }
7352                 if (!fp->disable_tpa)
7353                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7354                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
7355                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
7356         }
7357 }
7358
7359 static void bnx2x_free_skbs(struct bnx2x *bp)
7360 {
7361         bnx2x_free_tx_skbs(bp);
7362         bnx2x_free_rx_skbs(bp);
7363 }
7364
7365 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7366 {
7367         int i, offset = 1;
7368
7369         free_irq(bp->msix_table[0].vector, bp->dev);
7370         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7371            bp->msix_table[0].vector);
7372
7373 #ifdef BCM_CNIC
7374         offset++;
7375 #endif
7376         for_each_queue(bp, i) {
7377                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
7378                    "state %x\n", i, bp->msix_table[i + offset].vector,
7379                    bnx2x_fp(bp, i, state));
7380
7381                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7382         }
7383 }
7384
7385 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7386 {
7387         if (bp->flags & USING_MSIX_FLAG) {
7388                 if (!disable_only)
7389                         bnx2x_free_msix_irqs(bp);
7390                 pci_disable_msix(bp->pdev);
7391                 bp->flags &= ~USING_MSIX_FLAG;
7392
7393         } else if (bp->flags & USING_MSI_FLAG) {
7394                 if (!disable_only)
7395                         free_irq(bp->pdev->irq, bp->dev);
7396                 pci_disable_msi(bp->pdev);
7397                 bp->flags &= ~USING_MSI_FLAG;
7398
7399         } else if (!disable_only)
7400                 free_irq(bp->pdev->irq, bp->dev);
7401 }
7402
7403 static int bnx2x_enable_msix(struct bnx2x *bp)
7404 {
7405         int i, rc, offset = 1;
7406         int igu_vec = 0;
7407
7408         bp->msix_table[0].entry = igu_vec;
7409         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7410
7411 #ifdef BCM_CNIC
7412         igu_vec = BP_L_ID(bp) + offset;
7413         bp->msix_table[1].entry = igu_vec;
7414         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7415         offset++;
7416 #endif
7417         for_each_queue(bp, i) {
7418                 igu_vec = BP_L_ID(bp) + offset + i;
7419                 bp->msix_table[i + offset].entry = igu_vec;
7420                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7421                    "(fastpath #%u)\n", i + offset, igu_vec, i);
7422         }
7423
7424         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7425                              BNX2X_NUM_QUEUES(bp) + offset);
7426
7427         /*
7428          * reconfigure number of tx/rx queues according to available
7429          * MSI-X vectors
7430          */
7431         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7432                 /* vectors available for FP */
7433                 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7434
7435                 DP(NETIF_MSG_IFUP,
7436                    "Trying to use less MSI-X vectors: %d\n", rc);
7437
7438                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7439
7440                 if (rc) {
7441                         DP(NETIF_MSG_IFUP,
7442                            "MSI-X is not attainable  rc %d\n", rc);
7443                         return rc;
7444                 }
7445
7446                 bp->num_queues = min(bp->num_queues, fp_vec);
7447
7448                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7449                                   bp->num_queues);
7450         } else if (rc) {
7451                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
7452                 return rc;
7453         }
7454
7455         bp->flags |= USING_MSIX_FLAG;
7456
7457         return 0;
7458 }
7459
7460 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7461 {
7462         int i, rc, offset = 1;
7463
7464         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7465                          bp->dev->name, bp->dev);
7466         if (rc) {
7467                 BNX2X_ERR("request sp irq failed\n");
7468                 return -EBUSY;
7469         }
7470
7471 #ifdef BCM_CNIC
7472         offset++;
7473 #endif
7474         for_each_queue(bp, i) {
7475                 struct bnx2x_fastpath *fp = &bp->fp[i];
7476                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7477                          bp->dev->name, i);
7478
7479                 rc = request_irq(bp->msix_table[i + offset].vector,
7480                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7481                 if (rc) {
7482                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7483                         bnx2x_free_msix_irqs(bp);
7484                         return -EBUSY;
7485                 }
7486
7487                 fp->state = BNX2X_FP_STATE_IRQ;
7488         }
7489
7490         i = BNX2X_NUM_QUEUES(bp);
7491         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
7492                " ... fp[%d] %d\n",
7493                bp->msix_table[0].vector,
7494                0, bp->msix_table[offset].vector,
7495                i - 1, bp->msix_table[offset + i - 1].vector);
7496
7497         return 0;
7498 }
7499
7500 static int bnx2x_enable_msi(struct bnx2x *bp)
7501 {
7502         int rc;
7503
7504         rc = pci_enable_msi(bp->pdev);
7505         if (rc) {
7506                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7507                 return -1;
7508         }
7509         bp->flags |= USING_MSI_FLAG;
7510
7511         return 0;
7512 }
7513
7514 static int bnx2x_req_irq(struct bnx2x *bp)
7515 {
7516         unsigned long flags;
7517         int rc;
7518
7519         if (bp->flags & USING_MSI_FLAG)
7520                 flags = 0;
7521         else
7522                 flags = IRQF_SHARED;
7523
7524         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7525                          bp->dev->name, bp->dev);
7526         if (!rc)
7527                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7528
7529         return rc;
7530 }
7531
7532 static void bnx2x_napi_enable(struct bnx2x *bp)
7533 {
7534         int i;
7535
7536         for_each_queue(bp, i)
7537                 napi_enable(&bnx2x_fp(bp, i, napi));
7538 }
7539
7540 static void bnx2x_napi_disable(struct bnx2x *bp)
7541 {
7542         int i;
7543
7544         for_each_queue(bp, i)
7545                 napi_disable(&bnx2x_fp(bp, i, napi));
7546 }
7547
7548 static void bnx2x_netif_start(struct bnx2x *bp)
7549 {
7550         int intr_sem;
7551
7552         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7553         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7554
7555         if (intr_sem) {
7556                 if (netif_running(bp->dev)) {
7557                         bnx2x_napi_enable(bp);
7558                         bnx2x_int_enable(bp);
7559                         if (bp->state == BNX2X_STATE_OPEN)
7560                                 netif_tx_wake_all_queues(bp->dev);
7561                 }
7562         }
7563 }
7564
7565 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7566 {
7567         bnx2x_int_disable_sync(bp, disable_hw);
7568         bnx2x_napi_disable(bp);
7569         netif_tx_disable(bp->dev);
7570 }
7571
7572 /*
7573  * Init service functions
7574  */
7575
7576 /**
7577  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7578  *
7579  * @param bp driver descriptor
7580  * @param set set or clear an entry (1 or 0)
7581  * @param mac pointer to a buffer containing a MAC
7582  * @param cl_bit_vec bit vector of clients to register a MAC for
7583  * @param cam_offset offset in a CAM to use
7584  * @param with_bcast set broadcast MAC as well
7585  */
7586 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7587                                       u32 cl_bit_vec, u8 cam_offset,
7588                                       u8 with_bcast)
7589 {
7590         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7591         int port = BP_PORT(bp);
7592
7593         /* CAM allocation
7594          * unicasts 0-31:port0 32-63:port1
7595          * multicast 64-127:port0 128-191:port1
7596          */
7597         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7598         config->hdr.offset = cam_offset;
7599         config->hdr.client_id = 0xff;
7600         config->hdr.reserved1 = 0;
7601
7602         /* primary MAC */
7603         config->config_table[0].cam_entry.msb_mac_addr =
7604                                         swab16(*(u16 *)&mac[0]);
7605         config->config_table[0].cam_entry.middle_mac_addr =
7606                                         swab16(*(u16 *)&mac[2]);
7607         config->config_table[0].cam_entry.lsb_mac_addr =
7608                                         swab16(*(u16 *)&mac[4]);
7609         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7610         if (set)
7611                 config->config_table[0].target_table_entry.flags = 0;
7612         else
7613                 CAM_INVALIDATE(config->config_table[0]);
7614         config->config_table[0].target_table_entry.clients_bit_vector =
7615                                                 cpu_to_le32(cl_bit_vec);
7616         config->config_table[0].target_table_entry.vlan_id = 0;
7617
7618         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7619            (set ? "setting" : "clearing"),
7620            config->config_table[0].cam_entry.msb_mac_addr,
7621            config->config_table[0].cam_entry.middle_mac_addr,
7622            config->config_table[0].cam_entry.lsb_mac_addr);
7623
7624         /* broadcast */
7625         if (with_bcast) {
7626                 config->config_table[1].cam_entry.msb_mac_addr =
7627                         cpu_to_le16(0xffff);
7628                 config->config_table[1].cam_entry.middle_mac_addr =
7629                         cpu_to_le16(0xffff);
7630                 config->config_table[1].cam_entry.lsb_mac_addr =
7631                         cpu_to_le16(0xffff);
7632                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7633                 if (set)
7634                         config->config_table[1].target_table_entry.flags =
7635                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7636                 else
7637                         CAM_INVALIDATE(config->config_table[1]);
7638                 config->config_table[1].target_table_entry.clients_bit_vector =
7639                                                         cpu_to_le32(cl_bit_vec);
7640                 config->config_table[1].target_table_entry.vlan_id = 0;
7641         }
7642
7643         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7644                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7645                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7646 }
7647
7648 /**
7649  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7650  *
7651  * @param bp driver descriptor
7652  * @param set set or clear an entry (1 or 0)
7653  * @param mac pointer to a buffer containing a MAC
7654  * @param cl_bit_vec bit vector of clients to register a MAC for
7655  * @param cam_offset offset in a CAM to use
7656  */
7657 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7658                                        u32 cl_bit_vec, u8 cam_offset)
7659 {
7660         struct mac_configuration_cmd_e1h *config =
7661                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7662
7663         config->hdr.length = 1;
7664         config->hdr.offset = cam_offset;
7665         config->hdr.client_id = 0xff;
7666         config->hdr.reserved1 = 0;
7667
7668         /* primary MAC */
7669         config->config_table[0].msb_mac_addr =
7670                                         swab16(*(u16 *)&mac[0]);
7671         config->config_table[0].middle_mac_addr =
7672                                         swab16(*(u16 *)&mac[2]);
7673         config->config_table[0].lsb_mac_addr =
7674                                         swab16(*(u16 *)&mac[4]);
7675         config->config_table[0].clients_bit_vector =
7676                                         cpu_to_le32(cl_bit_vec);
7677         config->config_table[0].vlan_id = 0;
7678         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7679         if (set)
7680                 config->config_table[0].flags = BP_PORT(bp);
7681         else
7682                 config->config_table[0].flags =
7683                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7684
7685         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7686            (set ? "setting" : "clearing"),
7687            config->config_table[0].msb_mac_addr,
7688            config->config_table[0].middle_mac_addr,
7689            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7690
7691         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7692                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7693                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7694 }
7695
7696 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7697                              int *state_p, int poll)
7698 {
7699         /* can take a while if any port is running */
7700         int cnt = 5000;
7701
7702         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7703            poll ? "polling" : "waiting", state, idx);
7704
7705         might_sleep();
7706         while (cnt--) {
7707                 if (poll) {
7708                         bnx2x_rx_int(bp->fp, 10);
7709                         /* if index is different from 0
7710                          * the reply for some commands will
7711                          * be on the non default queue
7712                          */
7713                         if (idx)
7714                                 bnx2x_rx_int(&bp->fp[idx], 10);
7715                 }
7716
7717                 mb(); /* state is changed by bnx2x_sp_event() */
7718                 if (*state_p == state) {
7719 #ifdef BNX2X_STOP_ON_ERROR
7720                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7721 #endif
7722                         return 0;
7723                 }
7724
7725                 msleep(1);
7726
7727                 if (bp->panic)
7728                         return -EIO;
7729         }
7730
7731         /* timeout! */
7732         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7733                   poll ? "polling" : "waiting", state, idx);
7734 #ifdef BNX2X_STOP_ON_ERROR
7735         bnx2x_panic();
7736 #endif
7737
7738         return -EBUSY;
7739 }
7740
7741 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7742 {
7743         bp->set_mac_pending++;
7744         smp_wmb();
7745
7746         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7747                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7748
7749         /* Wait for a completion */
7750         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7751 }
7752
7753 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7754 {
7755         bp->set_mac_pending++;
7756         smp_wmb();
7757
7758         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7759                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7760                                   1);
7761
7762         /* Wait for a completion */
7763         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7764 }
7765
7766 #ifdef BCM_CNIC
7767 /**
7768  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7769  * MAC(s). This function will wait until the ramdord completion
7770  * returns.
7771  *
7772  * @param bp driver handle
7773  * @param set set or clear the CAM entry
7774  *
7775  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7776  */
7777 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7778 {
7779         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7780
7781         bp->set_mac_pending++;
7782         smp_wmb();
7783
7784         /* Send a SET_MAC ramrod */
7785         if (CHIP_IS_E1(bp))
7786                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7787                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7788                                   1);
7789         else
7790                 /* CAM allocation for E1H
7791                 * unicasts: by func number
7792                 * multicast: 20+FUNC*20, 20 each
7793                 */
7794                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7795                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7796
7797         /* Wait for a completion when setting */
7798         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7799
7800         return 0;
7801 }
7802 #endif
7803
7804 static int bnx2x_setup_leading(struct bnx2x *bp)
7805 {
7806         int rc;
7807
7808         /* reset IGU state */
7809         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7810
7811         /* SETUP ramrod */
7812         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7813
7814         /* Wait for completion */
7815         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7816
7817         return rc;
7818 }
7819
7820 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7821 {
7822         struct bnx2x_fastpath *fp = &bp->fp[index];
7823
7824         /* reset IGU state */
7825         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7826
7827         /* SETUP ramrod */
7828         fp->state = BNX2X_FP_STATE_OPENING;
7829         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7830                       fp->cl_id, 0);
7831
7832         /* Wait for completion */
7833         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7834                                  &(fp->state), 0);
7835 }
7836
7837 static int bnx2x_poll(struct napi_struct *napi, int budget);
7838
7839 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7840 {
7841
7842         switch (bp->multi_mode) {
7843         case ETH_RSS_MODE_DISABLED:
7844                 bp->num_queues = 1;
7845                 break;
7846
7847         case ETH_RSS_MODE_REGULAR:
7848                 if (num_queues)
7849                         bp->num_queues = min_t(u32, num_queues,
7850                                                   BNX2X_MAX_QUEUES(bp));
7851                 else
7852                         bp->num_queues = min_t(u32, num_online_cpus(),
7853                                                   BNX2X_MAX_QUEUES(bp));
7854                 break;
7855
7856
7857         default:
7858                 bp->num_queues = 1;
7859                 break;
7860         }
7861 }
7862
7863 static int bnx2x_set_num_queues(struct bnx2x *bp)
7864 {
7865         int rc = 0;
7866
7867         switch (int_mode) {
7868         case INT_MODE_INTx:
7869         case INT_MODE_MSI:
7870                 bp->num_queues = 1;
7871                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7872                 break;
7873         default:
7874                 /* Set number of queues according to bp->multi_mode value */
7875                 bnx2x_set_num_queues_msix(bp);
7876
7877                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7878                    bp->num_queues);
7879
7880                 /* if we can't use MSI-X we only need one fp,
7881                  * so try to enable MSI-X with the requested number of fp's
7882                  * and fallback to MSI or legacy INTx with one fp
7883                  */
7884                 rc = bnx2x_enable_msix(bp);
7885                 if (rc)
7886                         /* failed to enable MSI-X */
7887                         bp->num_queues = 1;
7888                 break;
7889         }
7890         bp->dev->real_num_tx_queues = bp->num_queues;
7891         return rc;
7892 }
7893
7894 #ifdef BCM_CNIC
7895 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7896 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7897 #endif
7898
7899 /* must be called with rtnl_lock */
7900 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7901 {
7902         u32 load_code;
7903         int i, rc;
7904
7905 #ifdef BNX2X_STOP_ON_ERROR
7906         if (unlikely(bp->panic))
7907                 return -EPERM;
7908 #endif
7909
7910         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7911
7912         rc = bnx2x_set_num_queues(bp);
7913
7914         if (bnx2x_alloc_mem(bp)) {
7915                 bnx2x_free_irq(bp, true);
7916                 return -ENOMEM;
7917         }
7918
7919         for_each_queue(bp, i)
7920                 bnx2x_fp(bp, i, disable_tpa) =
7921                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7922
7923         for_each_queue(bp, i)
7924                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7925                                bnx2x_poll, 128);
7926
7927         bnx2x_napi_enable(bp);
7928
7929         if (bp->flags & USING_MSIX_FLAG) {
7930                 rc = bnx2x_req_msix_irqs(bp);
7931                 if (rc) {
7932                         bnx2x_free_irq(bp, true);
7933                         goto load_error1;
7934                 }
7935         } else {
7936                 /* Fall to INTx if failed to enable MSI-X due to lack of
7937                    memory (in bnx2x_set_num_queues()) */
7938                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7939                         bnx2x_enable_msi(bp);
7940                 bnx2x_ack_int(bp);
7941                 rc = bnx2x_req_irq(bp);
7942                 if (rc) {
7943                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7944                         bnx2x_free_irq(bp, true);
7945                         goto load_error1;
7946                 }
7947                 if (bp->flags & USING_MSI_FLAG) {
7948                         bp->dev->irq = bp->pdev->irq;
7949                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
7950                                     bp->pdev->irq);
7951                 }
7952         }
7953
7954         /* Send LOAD_REQUEST command to MCP
7955            Returns the type of LOAD command:
7956            if it is the first port to be initialized
7957            common blocks should be initialized, otherwise - not
7958         */
7959         if (!BP_NOMCP(bp)) {
7960                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7961                 if (!load_code) {
7962                         BNX2X_ERR("MCP response failure, aborting\n");
7963                         rc = -EBUSY;
7964                         goto load_error2;
7965                 }
7966                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7967                         rc = -EBUSY; /* other port in diagnostic mode */
7968                         goto load_error2;
7969                 }
7970
7971         } else {
7972                 int port = BP_PORT(bp);
7973
7974                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7975                    load_count[0], load_count[1], load_count[2]);
7976                 load_count[0]++;
7977                 load_count[1 + port]++;
7978                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7979                    load_count[0], load_count[1], load_count[2]);
7980                 if (load_count[0] == 1)
7981                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7982                 else if (load_count[1 + port] == 1)
7983                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7984                 else
7985                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7986         }
7987
7988         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7989             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7990                 bp->port.pmf = 1;
7991         else
7992                 bp->port.pmf = 0;
7993         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7994
7995         /* Initialize HW */
7996         rc = bnx2x_init_hw(bp, load_code);
7997         if (rc) {
7998                 BNX2X_ERR("HW init failed, aborting\n");
7999                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8000                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8001                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8002                 goto load_error2;
8003         }
8004
8005         /* Setup NIC internals and enable interrupts */
8006         bnx2x_nic_init(bp, load_code);
8007
8008         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8009             (bp->common.shmem2_base))
8010                 SHMEM2_WR(bp, dcc_support,
8011                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8012                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8013
8014         /* Send LOAD_DONE command to MCP */
8015         if (!BP_NOMCP(bp)) {
8016                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8017                 if (!load_code) {
8018                         BNX2X_ERR("MCP response failure, aborting\n");
8019                         rc = -EBUSY;
8020                         goto load_error3;
8021                 }
8022         }
8023
8024         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8025
8026         rc = bnx2x_setup_leading(bp);
8027         if (rc) {
8028                 BNX2X_ERR("Setup leading failed!\n");
8029 #ifndef BNX2X_STOP_ON_ERROR
8030                 goto load_error3;
8031 #else
8032                 bp->panic = 1;
8033                 return -EBUSY;
8034 #endif
8035         }
8036
8037         if (CHIP_IS_E1H(bp))
8038                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8039                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8040                         bp->flags |= MF_FUNC_DIS;
8041                 }
8042
8043         if (bp->state == BNX2X_STATE_OPEN) {
8044 #ifdef BCM_CNIC
8045                 /* Enable Timer scan */
8046                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8047 #endif
8048                 for_each_nondefault_queue(bp, i) {
8049                         rc = bnx2x_setup_multi(bp, i);
8050                         if (rc)
8051 #ifdef BCM_CNIC
8052                                 goto load_error4;
8053 #else
8054                                 goto load_error3;
8055 #endif
8056                 }
8057
8058                 if (CHIP_IS_E1(bp))
8059                         bnx2x_set_eth_mac_addr_e1(bp, 1);
8060                 else
8061                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
8062 #ifdef BCM_CNIC
8063                 /* Set iSCSI L2 MAC */
8064                 mutex_lock(&bp->cnic_mutex);
8065                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8066                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8067                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8068                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8069                                       CNIC_SB_ID(bp));
8070                 }
8071                 mutex_unlock(&bp->cnic_mutex);
8072 #endif
8073         }
8074
8075         if (bp->port.pmf)
8076                 bnx2x_initial_phy_init(bp, load_mode);
8077
8078         /* Start fast path */
8079         switch (load_mode) {
8080         case LOAD_NORMAL:
8081                 if (bp->state == BNX2X_STATE_OPEN) {
8082                         /* Tx queue should be only reenabled */
8083                         netif_tx_wake_all_queues(bp->dev);
8084                 }
8085                 /* Initialize the receive filter. */
8086                 bnx2x_set_rx_mode(bp->dev);
8087                 break;
8088
8089         case LOAD_OPEN:
8090                 netif_tx_start_all_queues(bp->dev);
8091                 if (bp->state != BNX2X_STATE_OPEN)
8092                         netif_tx_disable(bp->dev);
8093                 /* Initialize the receive filter. */
8094                 bnx2x_set_rx_mode(bp->dev);
8095                 break;
8096
8097         case LOAD_DIAG:
8098                 /* Initialize the receive filter. */
8099                 bnx2x_set_rx_mode(bp->dev);
8100                 bp->state = BNX2X_STATE_DIAG;
8101                 break;
8102
8103         default:
8104                 break;
8105         }
8106
8107         if (!bp->port.pmf)
8108                 bnx2x__link_status_update(bp);
8109
8110         /* start the timer */
8111         mod_timer(&bp->timer, jiffies + bp->current_interval);
8112
8113 #ifdef BCM_CNIC
8114         bnx2x_setup_cnic_irq_info(bp);
8115         if (bp->state == BNX2X_STATE_OPEN)
8116                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8117 #endif
8118         bnx2x_inc_load_cnt(bp);
8119
8120         return 0;
8121
8122 #ifdef BCM_CNIC
8123 load_error4:
8124         /* Disable Timer scan */
8125         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8126 #endif
8127 load_error3:
8128         bnx2x_int_disable_sync(bp, 1);
8129         if (!BP_NOMCP(bp)) {
8130                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8131                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8132         }
8133         bp->port.pmf = 0;
8134         /* Free SKBs, SGEs, TPA pool and driver internals */
8135         bnx2x_free_skbs(bp);
8136         for_each_queue(bp, i)
8137                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8138 load_error2:
8139         /* Release IRQs */
8140         bnx2x_free_irq(bp, false);
8141 load_error1:
8142         bnx2x_napi_disable(bp);
8143         for_each_queue(bp, i)
8144                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8145         bnx2x_free_mem(bp);
8146
8147         return rc;
8148 }
8149
8150 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8151 {
8152         struct bnx2x_fastpath *fp = &bp->fp[index];
8153         int rc;
8154
8155         /* halt the connection */
8156         fp->state = BNX2X_FP_STATE_HALTING;
8157         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8158
8159         /* Wait for completion */
8160         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8161                                &(fp->state), 1);
8162         if (rc) /* timeout */
8163                 return rc;
8164
8165         /* delete cfc entry */
8166         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8167
8168         /* Wait for completion */
8169         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8170                                &(fp->state), 1);
8171         return rc;
8172 }
8173
8174 static int bnx2x_stop_leading(struct bnx2x *bp)
8175 {
8176         __le16 dsb_sp_prod_idx;
8177         /* if the other port is handling traffic,
8178            this can take a lot of time */
8179         int cnt = 500;
8180         int rc;
8181
8182         might_sleep();
8183
8184         /* Send HALT ramrod */
8185         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8186         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8187
8188         /* Wait for completion */
8189         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8190                                &(bp->fp[0].state), 1);
8191         if (rc) /* timeout */
8192                 return rc;
8193
8194         dsb_sp_prod_idx = *bp->dsb_sp_prod;
8195
8196         /* Send PORT_DELETE ramrod */
8197         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8198
8199         /* Wait for completion to arrive on default status block
8200            we are going to reset the chip anyway
8201            so there is not much to do if this times out
8202          */
8203         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8204                 if (!cnt) {
8205                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8206                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8207                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
8208 #ifdef BNX2X_STOP_ON_ERROR
8209                         bnx2x_panic();
8210 #endif
8211                         rc = -EBUSY;
8212                         break;
8213                 }
8214                 cnt--;
8215                 msleep(1);
8216                 rmb(); /* Refresh the dsb_sp_prod */
8217         }
8218         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8219         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8220
8221         return rc;
8222 }
8223
8224 static void bnx2x_reset_func(struct bnx2x *bp)
8225 {
8226         int port = BP_PORT(bp);
8227         int func = BP_FUNC(bp);
8228         int base, i;
8229
8230         /* Configure IGU */
8231         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8232         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8233
8234 #ifdef BCM_CNIC
8235         /* Disable Timer scan */
8236         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8237         /*
8238          * Wait for at least 10ms and up to 2 second for the timers scan to
8239          * complete
8240          */
8241         for (i = 0; i < 200; i++) {
8242                 msleep(10);
8243                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8244                         break;
8245         }
8246 #endif
8247         /* Clear ILT */
8248         base = FUNC_ILT_BASE(func);
8249         for (i = base; i < base + ILT_PER_FUNC; i++)
8250                 bnx2x_ilt_wr(bp, i, 0);
8251 }
8252
8253 static void bnx2x_reset_port(struct bnx2x *bp)
8254 {
8255         int port = BP_PORT(bp);
8256         u32 val;
8257
8258         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8259
8260         /* Do not rcv packets to BRB */
8261         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8262         /* Do not direct rcv packets that are not for MCP to the BRB */
8263         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8264                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8265
8266         /* Configure AEU */
8267         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8268
8269         msleep(100);
8270         /* Check for BRB port occupancy */
8271         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8272         if (val)
8273                 DP(NETIF_MSG_IFDOWN,
8274                    "BRB1 is not empty  %d blocks are occupied\n", val);
8275
8276         /* TODO: Close Doorbell port? */
8277 }
8278
8279 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8280 {
8281         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
8282            BP_FUNC(bp), reset_code);
8283
8284         switch (reset_code) {
8285         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8286                 bnx2x_reset_port(bp);
8287                 bnx2x_reset_func(bp);
8288                 bnx2x_reset_common(bp);
8289                 break;
8290
8291         case FW_MSG_CODE_DRV_UNLOAD_PORT:
8292                 bnx2x_reset_port(bp);
8293                 bnx2x_reset_func(bp);
8294                 break;
8295
8296         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8297                 bnx2x_reset_func(bp);
8298                 break;
8299
8300         default:
8301                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8302                 break;
8303         }
8304 }
8305
8306 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8307 {
8308         int port = BP_PORT(bp);
8309         u32 reset_code = 0;
8310         int i, cnt, rc;
8311
8312         /* Wait until tx fastpath tasks complete */
8313         for_each_queue(bp, i) {
8314                 struct bnx2x_fastpath *fp = &bp->fp[i];
8315
8316                 cnt = 1000;
8317                 while (bnx2x_has_tx_work_unload(fp)) {
8318
8319                         bnx2x_tx_int(fp);
8320                         if (!cnt) {
8321                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
8322                                           i);
8323 #ifdef BNX2X_STOP_ON_ERROR
8324                                 bnx2x_panic();
8325                                 return -EBUSY;
8326 #else
8327                                 break;
8328 #endif
8329                         }
8330                         cnt--;
8331                         msleep(1);
8332                 }
8333         }
8334         /* Give HW time to discard old tx messages */
8335         msleep(1);
8336
8337         if (CHIP_IS_E1(bp)) {
8338                 struct mac_configuration_cmd *config =
8339                                                 bnx2x_sp(bp, mcast_config);
8340
8341                 bnx2x_set_eth_mac_addr_e1(bp, 0);
8342
8343                 for (i = 0; i < config->hdr.length; i++)
8344                         CAM_INVALIDATE(config->config_table[i]);
8345
8346                 config->hdr.length = i;
8347                 if (CHIP_REV_IS_SLOW(bp))
8348                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8349                 else
8350                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8351                 config->hdr.client_id = bp->fp->cl_id;
8352                 config->hdr.reserved1 = 0;
8353
8354                 bp->set_mac_pending++;
8355                 smp_wmb();
8356
8357                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8358                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8359                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8360
8361         } else { /* E1H */
8362                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8363
8364                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8365
8366                 for (i = 0; i < MC_HASH_SIZE; i++)
8367                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8368
8369                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8370         }
8371 #ifdef BCM_CNIC
8372         /* Clear iSCSI L2 MAC */
8373         mutex_lock(&bp->cnic_mutex);
8374         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8375                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8376                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8377         }
8378         mutex_unlock(&bp->cnic_mutex);
8379 #endif
8380
8381         if (unload_mode == UNLOAD_NORMAL)
8382                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8383
8384         else if (bp->flags & NO_WOL_FLAG)
8385                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8386
8387         else if (bp->wol) {
8388                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8389                 u8 *mac_addr = bp->dev->dev_addr;
8390                 u32 val;
8391                 /* The mac address is written to entries 1-4 to
8392                    preserve entry 0 which is used by the PMF */
8393                 u8 entry = (BP_E1HVN(bp) + 1)*8;
8394
8395                 val = (mac_addr[0] << 8) | mac_addr[1];
8396                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8397
8398                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8399                       (mac_addr[4] << 8) | mac_addr[5];
8400                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8401
8402                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8403
8404         } else
8405                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8406
8407         /* Close multi and leading connections
8408            Completions for ramrods are collected in a synchronous way */
8409         for_each_nondefault_queue(bp, i)
8410                 if (bnx2x_stop_multi(bp, i))
8411                         goto unload_error;
8412
8413         rc = bnx2x_stop_leading(bp);
8414         if (rc) {
8415                 BNX2X_ERR("Stop leading failed!\n");
8416 #ifdef BNX2X_STOP_ON_ERROR
8417                 return -EBUSY;
8418 #else
8419                 goto unload_error;
8420 #endif
8421         }
8422
8423 unload_error:
8424         if (!BP_NOMCP(bp))
8425                 reset_code = bnx2x_fw_command(bp, reset_code);
8426         else {
8427                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
8428                    load_count[0], load_count[1], load_count[2]);
8429                 load_count[0]--;
8430                 load_count[1 + port]--;
8431                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
8432                    load_count[0], load_count[1], load_count[2]);
8433                 if (load_count[0] == 0)
8434                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8435                 else if (load_count[1 + port] == 0)
8436                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8437                 else
8438                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8439         }
8440
8441         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8442             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8443                 bnx2x__link_reset(bp);
8444
8445         /* Reset the chip */
8446         bnx2x_reset_chip(bp, reset_code);
8447
8448         /* Report UNLOAD_DONE to MCP */
8449         if (!BP_NOMCP(bp))
8450                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8451
8452 }
8453
8454 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8455 {
8456         u32 val;
8457
8458         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8459
8460         if (CHIP_IS_E1(bp)) {
8461                 int port = BP_PORT(bp);
8462                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8463                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
8464
8465                 val = REG_RD(bp, addr);
8466                 val &= ~(0x300);
8467                 REG_WR(bp, addr, val);
8468         } else if (CHIP_IS_E1H(bp)) {
8469                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8470                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8471                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8472                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8473         }
8474 }
8475
8476 /* must be called with rtnl_lock */
8477 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8478 {
8479         int i;
8480
8481         if (bp->state == BNX2X_STATE_CLOSED) {
8482                 /* Interface has been removed - nothing to recover */
8483                 bp->recovery_state = BNX2X_RECOVERY_DONE;
8484                 bp->is_leader = 0;
8485                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8486                 smp_wmb();
8487
8488                 return -EINVAL;
8489         }
8490
8491 #ifdef BCM_CNIC
8492         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8493 #endif
8494         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8495
8496         /* Set "drop all" */
8497         bp->rx_mode = BNX2X_RX_MODE_NONE;
8498         bnx2x_set_storm_rx_mode(bp);
8499
8500         /* Disable HW interrupts, NAPI and Tx */
8501         bnx2x_netif_stop(bp, 1);
8502         netif_carrier_off(bp->dev);
8503
8504         del_timer_sync(&bp->timer);
8505         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8506                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8507         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8508
8509         /* Release IRQs */
8510         bnx2x_free_irq(bp, false);
8511
8512         /* Cleanup the chip if needed */
8513         if (unload_mode != UNLOAD_RECOVERY)
8514                 bnx2x_chip_cleanup(bp, unload_mode);
8515
8516         bp->port.pmf = 0;
8517
8518         /* Free SKBs, SGEs, TPA pool and driver internals */
8519         bnx2x_free_skbs(bp);
8520         for_each_queue(bp, i)
8521                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8522         for_each_queue(bp, i)
8523                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8524         bnx2x_free_mem(bp);
8525
8526         bp->state = BNX2X_STATE_CLOSED;
8527
8528         /* The last driver must disable a "close the gate" if there is no
8529          * parity attention or "process kill" pending.
8530          */
8531         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8532             bnx2x_reset_is_done(bp))
8533                 bnx2x_disable_close_the_gate(bp);
8534
8535         /* Reset MCP mail box sequence if there is on going recovery */
8536         if (unload_mode == UNLOAD_RECOVERY)
8537                 bp->fw_seq = 0;
8538
8539         return 0;
8540 }
8541
8542 /* Close gates #2, #3 and #4: */
8543 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8544 {
8545         u32 val, addr;
8546
8547         /* Gates #2 and #4a are closed/opened for "not E1" only */
8548         if (!CHIP_IS_E1(bp)) {
8549                 /* #4 */
8550                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8551                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8552                        close ? (val | 0x1) : (val & (~(u32)1)));
8553                 /* #2 */
8554                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8555                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8556                        close ? (val | 0x1) : (val & (~(u32)1)));
8557         }
8558
8559         /* #3 */
8560         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8561         val = REG_RD(bp, addr);
8562         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8563
8564         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8565                 close ? "closing" : "opening");
8566         mmiowb();
8567 }
8568
8569 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
8570
8571 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8572 {
8573         /* Do some magic... */
8574         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8575         *magic_val = val & SHARED_MF_CLP_MAGIC;
8576         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8577 }
8578
8579 /* Restore the value of the `magic' bit.
8580  *
8581  * @param pdev Device handle.
8582  * @param magic_val Old value of the `magic' bit.
8583  */
8584 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8585 {
8586         /* Restore the `magic' bit value... */
8587         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8588         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8589                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8590         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8591         MF_CFG_WR(bp, shared_mf_config.clp_mb,
8592                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8593 }
8594
8595 /* Prepares for MCP reset: takes care of CLP configurations.
8596  *
8597  * @param bp
8598  * @param magic_val Old value of 'magic' bit.
8599  */
8600 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8601 {
8602         u32 shmem;
8603         u32 validity_offset;
8604
8605         DP(NETIF_MSG_HW, "Starting\n");
8606
8607         /* Set `magic' bit in order to save MF config */
8608         if (!CHIP_IS_E1(bp))
8609                 bnx2x_clp_reset_prep(bp, magic_val);
8610
8611         /* Get shmem offset */
8612         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8613         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8614
8615         /* Clear validity map flags */
8616         if (shmem > 0)
8617                 REG_WR(bp, shmem + validity_offset, 0);
8618 }
8619
8620 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
8621 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
8622
8623 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8624  * depending on the HW type.
8625  *
8626  * @param bp
8627  */
8628 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8629 {
8630         /* special handling for emulation and FPGA,
8631            wait 10 times longer */
8632         if (CHIP_REV_IS_SLOW(bp))
8633                 msleep(MCP_ONE_TIMEOUT*10);
8634         else
8635                 msleep(MCP_ONE_TIMEOUT);
8636 }
8637
8638 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8639 {
8640         u32 shmem, cnt, validity_offset, val;
8641         int rc = 0;
8642
8643         msleep(100);
8644
8645         /* Get shmem offset */
8646         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8647         if (shmem == 0) {
8648                 BNX2X_ERR("Shmem 0 return failure\n");
8649                 rc = -ENOTTY;
8650                 goto exit_lbl;
8651         }
8652
8653         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8654
8655         /* Wait for MCP to come up */
8656         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8657                 /* TBD: its best to check validity map of last port.
8658                  * currently checks on port 0.
8659                  */
8660                 val = REG_RD(bp, shmem + validity_offset);
8661                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8662                    shmem + validity_offset, val);
8663
8664                 /* check that shared memory is valid. */
8665                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8666                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8667                         break;
8668
8669                 bnx2x_mcp_wait_one(bp);
8670         }
8671
8672         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8673
8674         /* Check that shared memory is valid. This indicates that MCP is up. */
8675         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8676             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8677                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8678                 rc = -ENOTTY;
8679                 goto exit_lbl;
8680         }
8681
8682 exit_lbl:
8683         /* Restore the `magic' bit value */
8684         if (!CHIP_IS_E1(bp))
8685                 bnx2x_clp_reset_done(bp, magic_val);
8686
8687         return rc;
8688 }
8689
8690 static void bnx2x_pxp_prep(struct bnx2x *bp)
8691 {
8692         if (!CHIP_IS_E1(bp)) {
8693                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8694                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8695                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8696                 mmiowb();
8697         }
8698 }
8699
8700 /*
8701  * Reset the whole chip except for:
8702  *      - PCIE core
8703  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8704  *              one reset bit)
8705  *      - IGU
8706  *      - MISC (including AEU)
8707  *      - GRC
8708  *      - RBCN, RBCP
8709  */
8710 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8711 {
8712         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8713
8714         not_reset_mask1 =
8715                 MISC_REGISTERS_RESET_REG_1_RST_HC |
8716                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8717                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8718
8719         not_reset_mask2 =
8720                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8721                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8722                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8723                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8724                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8725                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
8726                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8727                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8728
8729         reset_mask1 = 0xffffffff;
8730
8731         if (CHIP_IS_E1(bp))
8732                 reset_mask2 = 0xffff;
8733         else
8734                 reset_mask2 = 0x1ffff;
8735
8736         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8737                reset_mask1 & (~not_reset_mask1));
8738         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8739                reset_mask2 & (~not_reset_mask2));
8740
8741         barrier();
8742         mmiowb();
8743
8744         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8745         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8746         mmiowb();
8747 }
8748
8749 static int bnx2x_process_kill(struct bnx2x *bp)
8750 {
8751         int cnt = 1000;
8752         u32 val = 0;
8753         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8754
8755
8756         /* Empty the Tetris buffer, wait for 1s */
8757         do {
8758                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8759                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8760                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8761                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8762                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8763                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8764                     ((port_is_idle_0 & 0x1) == 0x1) &&
8765                     ((port_is_idle_1 & 0x1) == 0x1) &&
8766                     (pgl_exp_rom2 == 0xffffffff))
8767                         break;
8768                 msleep(1);
8769         } while (cnt-- > 0);
8770
8771         if (cnt <= 0) {
8772                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8773                           " are still"
8774                           " outstanding read requests after 1s!\n");
8775                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8776                           " port_is_idle_0=0x%08x,"
8777                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8778                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8779                           pgl_exp_rom2);
8780                 return -EAGAIN;
8781         }
8782
8783         barrier();
8784
8785         /* Close gates #2, #3 and #4 */
8786         bnx2x_set_234_gates(bp, true);
8787
8788         /* TBD: Indicate that "process kill" is in progress to MCP */
8789
8790         /* Clear "unprepared" bit */
8791         REG_WR(bp, MISC_REG_UNPREPARED, 0);
8792         barrier();
8793
8794         /* Make sure all is written to the chip before the reset */
8795         mmiowb();
8796
8797         /* Wait for 1ms to empty GLUE and PCI-E core queues,
8798          * PSWHST, GRC and PSWRD Tetris buffer.
8799          */
8800         msleep(1);
8801
8802         /* Prepare to chip reset: */
8803         /* MCP */
8804         bnx2x_reset_mcp_prep(bp, &val);
8805
8806         /* PXP */
8807         bnx2x_pxp_prep(bp);
8808         barrier();
8809
8810         /* reset the chip */
8811         bnx2x_process_kill_chip_reset(bp);
8812         barrier();
8813
8814         /* Recover after reset: */
8815         /* MCP */
8816         if (bnx2x_reset_mcp_comp(bp, val))
8817                 return -EAGAIN;
8818
8819         /* PXP */
8820         bnx2x_pxp_prep(bp);
8821
8822         /* Open the gates #2, #3 and #4 */
8823         bnx2x_set_234_gates(bp, false);
8824
8825         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8826          * reset state, re-enable attentions. */
8827
8828         return 0;
8829 }
8830
8831 static int bnx2x_leader_reset(struct bnx2x *bp)
8832 {
8833         int rc = 0;
8834         /* Try to recover after the failure */
8835         if (bnx2x_process_kill(bp)) {
8836                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8837                        bp->dev->name);
8838                 rc = -EAGAIN;
8839                 goto exit_leader_reset;
8840         }
8841
8842         /* Clear "reset is in progress" bit and update the driver state */
8843         bnx2x_set_reset_done(bp);
8844         bp->recovery_state = BNX2X_RECOVERY_DONE;
8845
8846 exit_leader_reset:
8847         bp->is_leader = 0;
8848         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8849         smp_wmb();
8850         return rc;
8851 }
8852
8853 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8854
8855 /* Assumption: runs under rtnl lock. This together with the fact
8856  * that it's called only from bnx2x_reset_task() ensure that it
8857  * will never be called when netif_running(bp->dev) is false.
8858  */
8859 static void bnx2x_parity_recover(struct bnx2x *bp)
8860 {
8861         DP(NETIF_MSG_HW, "Handling parity\n");
8862         while (1) {
8863                 switch (bp->recovery_state) {
8864                 case BNX2X_RECOVERY_INIT:
8865                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8866                         /* Try to get a LEADER_LOCK HW lock */
8867                         if (bnx2x_trylock_hw_lock(bp,
8868                                 HW_LOCK_RESOURCE_RESERVED_08))
8869                                 bp->is_leader = 1;
8870
8871                         /* Stop the driver */
8872                         /* If interface has been removed - break */
8873                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8874                                 return;
8875
8876                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
8877                         /* Ensure "is_leader" and "recovery_state"
8878                          *  update values are seen on other CPUs
8879                          */
8880                         smp_wmb();
8881                         break;
8882
8883                 case BNX2X_RECOVERY_WAIT:
8884                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8885                         if (bp->is_leader) {
8886                                 u32 load_counter = bnx2x_get_load_cnt(bp);
8887                                 if (load_counter) {
8888                                         /* Wait until all other functions get
8889                                          * down.
8890                                          */
8891                                         schedule_delayed_work(&bp->reset_task,
8892                                                                 HZ/10);
8893                                         return;
8894                                 } else {
8895                                         /* If all other functions got down -
8896                                          * try to bring the chip back to
8897                                          * normal. In any case it's an exit
8898                                          * point for a leader.
8899                                          */
8900                                         if (bnx2x_leader_reset(bp) ||
8901                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
8902                                                 printk(KERN_ERR"%s: Recovery "
8903                                                 "has failed. Power cycle is "
8904                                                 "needed.\n", bp->dev->name);
8905                                                 /* Disconnect this device */
8906                                                 netif_device_detach(bp->dev);
8907                                                 /* Block ifup for all function
8908                                                  * of this ASIC until
8909                                                  * "process kill" or power
8910                                                  * cycle.
8911                                                  */
8912                                                 bnx2x_set_reset_in_progress(bp);
8913                                                 /* Shut down the power */
8914                                                 bnx2x_set_power_state(bp,
8915                                                                 PCI_D3hot);
8916                                                 return;
8917                                         }
8918
8919                                         return;
8920                                 }
8921                         } else { /* non-leader */
8922                                 if (!bnx2x_reset_is_done(bp)) {
8923                                         /* Try to get a LEADER_LOCK HW lock as
8924                                          * long as a former leader may have
8925                                          * been unloaded by the user or
8926                                          * released a leadership by another
8927                                          * reason.
8928                                          */
8929                                         if (bnx2x_trylock_hw_lock(bp,
8930                                             HW_LOCK_RESOURCE_RESERVED_08)) {
8931                                                 /* I'm a leader now! Restart a
8932                                                  * switch case.
8933                                                  */
8934                                                 bp->is_leader = 1;
8935                                                 break;
8936                                         }
8937
8938                                         schedule_delayed_work(&bp->reset_task,
8939                                                                 HZ/10);
8940                                         return;
8941
8942                                 } else { /* A leader has completed
8943                                           * the "process kill". It's an exit
8944                                           * point for a non-leader.
8945                                           */
8946                                         bnx2x_nic_load(bp, LOAD_NORMAL);
8947                                         bp->recovery_state =
8948                                                 BNX2X_RECOVERY_DONE;
8949                                         smp_wmb();
8950                                         return;
8951                                 }
8952                         }
8953                 default:
8954                         return;
8955                 }
8956         }
8957 }
8958
8959 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8960  * scheduled on a general queue in order to prevent a dead lock.
8961  */
8962 static void bnx2x_reset_task(struct work_struct *work)
8963 {
8964         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8965
8966 #ifdef BNX2X_STOP_ON_ERROR
8967         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8968                   " so reset not done to allow debug dump,\n"
8969          KERN_ERR " you will need to reboot when done\n");
8970         return;
8971 #endif
8972
8973         rtnl_lock();
8974
8975         if (!netif_running(bp->dev))
8976                 goto reset_task_exit;
8977
8978         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8979                 bnx2x_parity_recover(bp);
8980         else {
8981                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8982                 bnx2x_nic_load(bp, LOAD_NORMAL);
8983         }
8984
8985 reset_task_exit:
8986         rtnl_unlock();
8987 }
8988
8989 /* end of nic load/unload */
8990
8991 /* ethtool_ops */
8992
8993 /*
8994  * Init service functions
8995  */
8996
8997 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8998 {
8999         switch (func) {
9000         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
9001         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9002         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9003         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9004         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9005         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9006         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9007         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9008         default:
9009                 BNX2X_ERR("Unsupported function index: %d\n", func);
9010                 return (u32)(-1);
9011         }
9012 }
9013
9014 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9015 {
9016         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9017
9018         /* Flush all outstanding writes */
9019         mmiowb();
9020
9021         /* Pretend to be function 0 */
9022         REG_WR(bp, reg, 0);
9023         /* Flush the GRC transaction (in the chip) */
9024         new_val = REG_RD(bp, reg);
9025         if (new_val != 0) {
9026                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9027                           new_val);
9028                 BUG();
9029         }
9030
9031         /* From now we are in the "like-E1" mode */
9032         bnx2x_int_disable(bp);
9033
9034         /* Flush all outstanding writes */
9035         mmiowb();
9036
9037         /* Restore the original funtion settings */
9038         REG_WR(bp, reg, orig_func);
9039         new_val = REG_RD(bp, reg);
9040         if (new_val != orig_func) {
9041                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9042                           orig_func, new_val);
9043                 BUG();
9044         }
9045 }
9046
9047 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9048 {
9049         if (CHIP_IS_E1H(bp))
9050                 bnx2x_undi_int_disable_e1h(bp, func);
9051         else
9052                 bnx2x_int_disable(bp);
9053 }
9054
9055 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9056 {
9057         u32 val;
9058
9059         /* Check if there is any driver already loaded */
9060         val = REG_RD(bp, MISC_REG_UNPREPARED);
9061         if (val == 0x1) {
9062                 /* Check if it is the UNDI driver
9063                  * UNDI driver initializes CID offset for normal bell to 0x7
9064                  */
9065                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9066                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9067                 if (val == 0x7) {
9068                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9069                         /* save our func */
9070                         int func = BP_FUNC(bp);
9071                         u32 swap_en;
9072                         u32 swap_val;
9073
9074                         /* clear the UNDI indication */
9075                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9076
9077                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
9078
9079                         /* try unload UNDI on port 0 */
9080                         bp->func = 0;
9081                         bp->fw_seq =
9082                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9083                                 DRV_MSG_SEQ_NUMBER_MASK);
9084                         reset_code = bnx2x_fw_command(bp, reset_code);
9085
9086                         /* if UNDI is loaded on the other port */
9087                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9088
9089                                 /* send "DONE" for previous unload */
9090                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9091
9092                                 /* unload UNDI on port 1 */
9093                                 bp->func = 1;
9094                                 bp->fw_seq =
9095                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9096                                         DRV_MSG_SEQ_NUMBER_MASK);
9097                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9098
9099                                 bnx2x_fw_command(bp, reset_code);
9100                         }
9101
9102                         /* now it's safe to release the lock */
9103                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9104
9105                         bnx2x_undi_int_disable(bp, func);
9106
9107                         /* close input traffic and wait for it */
9108                         /* Do not rcv packets to BRB */
9109                         REG_WR(bp,
9110                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9111                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9112                         /* Do not direct rcv packets that are not for MCP to
9113                          * the BRB */
9114                         REG_WR(bp,
9115                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9116                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9117                         /* clear AEU */
9118                         REG_WR(bp,
9119                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9120                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9121                         msleep(10);
9122
9123                         /* save NIG port swap info */
9124                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9125                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9126                         /* reset device */
9127                         REG_WR(bp,
9128                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9129                                0xd3ffffff);
9130                         REG_WR(bp,
9131                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9132                                0x1403);
9133                         /* take the NIG out of reset and restore swap values */
9134                         REG_WR(bp,
9135                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9136                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
9137                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9138                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9139
9140                         /* send unload done to the MCP */
9141                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9142
9143                         /* restore our func and fw_seq */
9144                         bp->func = func;
9145                         bp->fw_seq =
9146                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9147                                 DRV_MSG_SEQ_NUMBER_MASK);
9148
9149                 } else
9150                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9151         }
9152 }
9153
9154 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9155 {
9156         u32 val, val2, val3, val4, id;
9157         u16 pmc;
9158
9159         /* Get the chip revision id and number. */
9160         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9161         val = REG_RD(bp, MISC_REG_CHIP_NUM);
9162         id = ((val & 0xffff) << 16);
9163         val = REG_RD(bp, MISC_REG_CHIP_REV);
9164         id |= ((val & 0xf) << 12);
9165         val = REG_RD(bp, MISC_REG_CHIP_METAL);
9166         id |= ((val & 0xff) << 4);
9167         val = REG_RD(bp, MISC_REG_BOND_ID);
9168         id |= (val & 0xf);
9169         bp->common.chip_id = id;
9170         bp->link_params.chip_id = bp->common.chip_id;
9171         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9172
9173         val = (REG_RD(bp, 0x2874) & 0x55);
9174         if ((bp->common.chip_id & 0x1) ||
9175             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9176                 bp->flags |= ONE_PORT_FLAG;
9177                 BNX2X_DEV_INFO("single port device\n");
9178         }
9179
9180         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9181         bp->common.flash_size = (NVRAM_1MB_SIZE <<
9182                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
9183         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9184                        bp->common.flash_size, bp->common.flash_size);
9185
9186         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9187         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9188         bp->link_params.shmem_base = bp->common.shmem_base;
9189         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
9190                        bp->common.shmem_base, bp->common.shmem2_base);
9191
9192         if (!bp->common.shmem_base ||
9193             (bp->common.shmem_base < 0xA0000) ||
9194             (bp->common.shmem_base >= 0xC0000)) {
9195                 BNX2X_DEV_INFO("MCP not active\n");
9196                 bp->flags |= NO_MCP_FLAG;
9197                 return;
9198         }
9199
9200         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9201         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9202                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9203                 BNX2X_ERROR("BAD MCP validity signature\n");
9204
9205         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9206         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9207
9208         bp->link_params.hw_led_mode = ((bp->common.hw_config &
9209                                         SHARED_HW_CFG_LED_MODE_MASK) >>
9210                                        SHARED_HW_CFG_LED_MODE_SHIFT);
9211
9212         bp->link_params.feature_config_flags = 0;
9213         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9214         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9215                 bp->link_params.feature_config_flags |=
9216                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9217         else
9218                 bp->link_params.feature_config_flags &=
9219                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9220
9221         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9222         bp->common.bc_ver = val;
9223         BNX2X_DEV_INFO("bc_ver %X\n", val);
9224         if (val < BNX2X_BC_VER) {
9225                 /* for now only warn
9226                  * later we might need to enforce this */
9227                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9228                             "please upgrade BC\n", BNX2X_BC_VER, val);
9229         }
9230         bp->link_params.feature_config_flags |=
9231                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9232                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9233
9234         if (BP_E1HVN(bp) == 0) {
9235                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9236                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9237         } else {
9238                 /* no WOL capability for E1HVN != 0 */
9239                 bp->flags |= NO_WOL_FLAG;
9240         }
9241         BNX2X_DEV_INFO("%sWoL capable\n",
9242                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
9243
9244         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9245         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9246         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9247         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9248
9249         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9250                  val, val2, val3, val4);
9251 }
9252
9253 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9254                                                     u32 switch_cfg)
9255 {
9256         int port = BP_PORT(bp);
9257         u32 ext_phy_type;
9258
9259         switch (switch_cfg) {
9260         case SWITCH_CFG_1G:
9261                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9262
9263                 ext_phy_type =
9264                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9265                 switch (ext_phy_type) {
9266                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9267                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9268                                        ext_phy_type);
9269
9270                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9271                                                SUPPORTED_10baseT_Full |
9272                                                SUPPORTED_100baseT_Half |
9273                                                SUPPORTED_100baseT_Full |
9274                                                SUPPORTED_1000baseT_Full |
9275                                                SUPPORTED_2500baseX_Full |
9276                                                SUPPORTED_TP |
9277                                                SUPPORTED_FIBRE |
9278                                                SUPPORTED_Autoneg |
9279                                                SUPPORTED_Pause |
9280                                                SUPPORTED_Asym_Pause);
9281                         break;
9282
9283                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9284                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9285                                        ext_phy_type);
9286
9287                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9288                                                SUPPORTED_10baseT_Full |
9289                                                SUPPORTED_100baseT_Half |
9290                                                SUPPORTED_100baseT_Full |
9291                                                SUPPORTED_1000baseT_Full |
9292                                                SUPPORTED_TP |
9293                                                SUPPORTED_FIBRE |
9294                                                SUPPORTED_Autoneg |
9295                                                SUPPORTED_Pause |
9296                                                SUPPORTED_Asym_Pause);
9297                         break;
9298
9299                 default:
9300                         BNX2X_ERR("NVRAM config error. "
9301                                   "BAD SerDes ext_phy_config 0x%x\n",
9302                                   bp->link_params.ext_phy_config);
9303                         return;
9304                 }
9305
9306                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9307                                            port*0x10);
9308                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9309                 break;
9310
9311         case SWITCH_CFG_10G:
9312                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9313
9314                 ext_phy_type =
9315                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9316                 switch (ext_phy_type) {
9317                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9318                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9319                                        ext_phy_type);
9320
9321                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9322                                                SUPPORTED_10baseT_Full |
9323                                                SUPPORTED_100baseT_Half |
9324                                                SUPPORTED_100baseT_Full |
9325                                                SUPPORTED_1000baseT_Full |
9326                                                SUPPORTED_2500baseX_Full |
9327                                                SUPPORTED_10000baseT_Full |
9328                                                SUPPORTED_TP |
9329                                                SUPPORTED_FIBRE |
9330                                                SUPPORTED_Autoneg |
9331                                                SUPPORTED_Pause |
9332                                                SUPPORTED_Asym_Pause);
9333                         break;
9334
9335                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9336                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9337                                        ext_phy_type);
9338
9339                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9340                                                SUPPORTED_1000baseT_Full |
9341                                                SUPPORTED_FIBRE |
9342                                                SUPPORTED_Autoneg |
9343                                                SUPPORTED_Pause |
9344                                                SUPPORTED_Asym_Pause);
9345                         break;
9346
9347                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9348                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9349                                        ext_phy_type);
9350
9351                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9352                                                SUPPORTED_2500baseX_Full |
9353                                                SUPPORTED_1000baseT_Full |
9354                                                SUPPORTED_FIBRE |
9355                                                SUPPORTED_Autoneg |
9356                                                SUPPORTED_Pause |
9357                                                SUPPORTED_Asym_Pause);
9358                         break;
9359
9360                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9361                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9362                                        ext_phy_type);
9363
9364                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9365                                                SUPPORTED_FIBRE |
9366                                                SUPPORTED_Pause |
9367                                                SUPPORTED_Asym_Pause);
9368                         break;
9369
9370                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9371                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9372                                        ext_phy_type);
9373
9374                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9375                                                SUPPORTED_1000baseT_Full |
9376                                                SUPPORTED_FIBRE |
9377                                                SUPPORTED_Pause |
9378                                                SUPPORTED_Asym_Pause);
9379                         break;
9380
9381                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9382                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9383                                        ext_phy_type);
9384
9385                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9386                                                SUPPORTED_1000baseT_Full |
9387                                                SUPPORTED_Autoneg |
9388                                                SUPPORTED_FIBRE |
9389                                                SUPPORTED_Pause |
9390                                                SUPPORTED_Asym_Pause);
9391                         break;
9392
9393                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9394                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9395                                        ext_phy_type);
9396
9397                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9398                                                SUPPORTED_1000baseT_Full |
9399                                                SUPPORTED_Autoneg |
9400                                                SUPPORTED_FIBRE |
9401                                                SUPPORTED_Pause |
9402                                                SUPPORTED_Asym_Pause);
9403                         break;
9404
9405                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9406                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9407                                        ext_phy_type);
9408
9409                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9410                                                SUPPORTED_TP |
9411                                                SUPPORTED_Autoneg |
9412                                                SUPPORTED_Pause |
9413                                                SUPPORTED_Asym_Pause);
9414                         break;
9415
9416                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9417                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9418                                        ext_phy_type);
9419
9420                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9421                                                SUPPORTED_10baseT_Full |
9422                                                SUPPORTED_100baseT_Half |
9423                                                SUPPORTED_100baseT_Full |
9424                                                SUPPORTED_1000baseT_Full |
9425                                                SUPPORTED_10000baseT_Full |
9426                                                SUPPORTED_TP |
9427                                                SUPPORTED_Autoneg |
9428                                                SUPPORTED_Pause |
9429                                                SUPPORTED_Asym_Pause);
9430                         break;
9431
9432                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9433                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9434                                   bp->link_params.ext_phy_config);
9435                         break;
9436
9437                 default:
9438                         BNX2X_ERR("NVRAM config error. "
9439                                   "BAD XGXS ext_phy_config 0x%x\n",
9440                                   bp->link_params.ext_phy_config);
9441                         return;
9442                 }
9443
9444                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9445                                            port*0x18);
9446                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9447
9448                 break;
9449
9450         default:
9451                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9452                           bp->port.link_config);
9453                 return;
9454         }
9455         bp->link_params.phy_addr = bp->port.phy_addr;
9456
9457         /* mask what we support according to speed_cap_mask */
9458         if (!(bp->link_params.speed_cap_mask &
9459                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9460                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9461
9462         if (!(bp->link_params.speed_cap_mask &
9463                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9464                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9465
9466         if (!(bp->link_params.speed_cap_mask &
9467                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9468                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9469
9470         if (!(bp->link_params.speed_cap_mask &
9471                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9472                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9473
9474         if (!(bp->link_params.speed_cap_mask &
9475                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9476                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9477                                         SUPPORTED_1000baseT_Full);
9478
9479         if (!(bp->link_params.speed_cap_mask &
9480                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9481                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9482
9483         if (!(bp->link_params.speed_cap_mask &
9484                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9485                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9486
9487         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9488 }
9489
9490 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9491 {
9492         bp->link_params.req_duplex = DUPLEX_FULL;
9493
9494         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9495         case PORT_FEATURE_LINK_SPEED_AUTO:
9496                 if (bp->port.supported & SUPPORTED_Autoneg) {
9497                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9498                         bp->port.advertising = bp->port.supported;
9499                 } else {
9500                         u32 ext_phy_type =
9501                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9502
9503                         if ((ext_phy_type ==
9504                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9505                             (ext_phy_type ==
9506                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9507                                 /* force 10G, no AN */
9508                                 bp->link_params.req_line_speed = SPEED_10000;
9509                                 bp->port.advertising =
9510                                                 (ADVERTISED_10000baseT_Full |
9511                                                  ADVERTISED_FIBRE);
9512                                 break;
9513                         }
9514                         BNX2X_ERR("NVRAM config error. "
9515                                   "Invalid link_config 0x%x"
9516                                   "  Autoneg not supported\n",
9517                                   bp->port.link_config);
9518                         return;
9519                 }
9520                 break;
9521
9522         case PORT_FEATURE_LINK_SPEED_10M_FULL:
9523                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9524                         bp->link_params.req_line_speed = SPEED_10;
9525                         bp->port.advertising = (ADVERTISED_10baseT_Full |
9526                                                 ADVERTISED_TP);
9527                 } else {
9528                         BNX2X_ERROR("NVRAM config error. "
9529                                     "Invalid link_config 0x%x"
9530                                     "  speed_cap_mask 0x%x\n",
9531                                     bp->port.link_config,
9532                                     bp->link_params.speed_cap_mask);
9533                         return;
9534                 }
9535                 break;
9536
9537         case PORT_FEATURE_LINK_SPEED_10M_HALF:
9538                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9539                         bp->link_params.req_line_speed = SPEED_10;
9540                         bp->link_params.req_duplex = DUPLEX_HALF;
9541                         bp->port.advertising = (ADVERTISED_10baseT_Half |
9542                                                 ADVERTISED_TP);
9543                 } else {
9544                         BNX2X_ERROR("NVRAM config error. "
9545                                     "Invalid link_config 0x%x"
9546                                     "  speed_cap_mask 0x%x\n",
9547                                     bp->port.link_config,
9548                                     bp->link_params.speed_cap_mask);
9549                         return;
9550                 }
9551                 break;
9552
9553         case PORT_FEATURE_LINK_SPEED_100M_FULL:
9554                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9555                         bp->link_params.req_line_speed = SPEED_100;
9556                         bp->port.advertising = (ADVERTISED_100baseT_Full |
9557                                                 ADVERTISED_TP);
9558                 } else {
9559                         BNX2X_ERROR("NVRAM config error. "
9560                                     "Invalid link_config 0x%x"
9561                                     "  speed_cap_mask 0x%x\n",
9562                                     bp->port.link_config,
9563                                     bp->link_params.speed_cap_mask);
9564                         return;
9565                 }
9566                 break;
9567
9568         case PORT_FEATURE_LINK_SPEED_100M_HALF:
9569                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9570                         bp->link_params.req_line_speed = SPEED_100;
9571                         bp->link_params.req_duplex = DUPLEX_HALF;
9572                         bp->port.advertising = (ADVERTISED_100baseT_Half |
9573                                                 ADVERTISED_TP);
9574                 } else {
9575                         BNX2X_ERROR("NVRAM config error. "
9576                                     "Invalid link_config 0x%x"
9577                                     "  speed_cap_mask 0x%x\n",
9578                                     bp->port.link_config,
9579                                     bp->link_params.speed_cap_mask);
9580                         return;
9581                 }
9582                 break;
9583
9584         case PORT_FEATURE_LINK_SPEED_1G:
9585                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9586                         bp->link_params.req_line_speed = SPEED_1000;
9587                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
9588                                                 ADVERTISED_TP);
9589                 } else {
9590                         BNX2X_ERROR("NVRAM config error. "
9591                                     "Invalid link_config 0x%x"
9592                                     "  speed_cap_mask 0x%x\n",
9593                                     bp->port.link_config,
9594                                     bp->link_params.speed_cap_mask);
9595                         return;
9596                 }
9597                 break;
9598
9599         case PORT_FEATURE_LINK_SPEED_2_5G:
9600                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9601                         bp->link_params.req_line_speed = SPEED_2500;
9602                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
9603                                                 ADVERTISED_TP);
9604                 } else {
9605                         BNX2X_ERROR("NVRAM config error. "
9606                                     "Invalid link_config 0x%x"
9607                                     "  speed_cap_mask 0x%x\n",
9608                                     bp->port.link_config,
9609                                     bp->link_params.speed_cap_mask);
9610                         return;
9611                 }
9612                 break;
9613
9614         case PORT_FEATURE_LINK_SPEED_10G_CX4:
9615         case PORT_FEATURE_LINK_SPEED_10G_KX4:
9616         case PORT_FEATURE_LINK_SPEED_10G_KR:
9617                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9618                         bp->link_params.req_line_speed = SPEED_10000;
9619                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
9620                                                 ADVERTISED_FIBRE);
9621                 } else {
9622                         BNX2X_ERROR("NVRAM config error. "
9623                                     "Invalid link_config 0x%x"
9624                                     "  speed_cap_mask 0x%x\n",
9625                                     bp->port.link_config,
9626                                     bp->link_params.speed_cap_mask);
9627                         return;
9628                 }
9629                 break;
9630
9631         default:
9632                 BNX2X_ERROR("NVRAM config error. "
9633                             "BAD link speed link_config 0x%x\n",
9634                             bp->port.link_config);
9635                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9636                 bp->port.advertising = bp->port.supported;
9637                 break;
9638         }
9639
9640         bp->link_params.req_flow_ctrl = (bp->port.link_config &
9641                                          PORT_FEATURE_FLOW_CONTROL_MASK);
9642         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9643             !(bp->port.supported & SUPPORTED_Autoneg))
9644                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9645
9646         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
9647                        "  advertising 0x%x\n",
9648                        bp->link_params.req_line_speed,
9649                        bp->link_params.req_duplex,
9650                        bp->link_params.req_flow_ctrl, bp->port.advertising);
9651 }
9652
9653 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9654 {
9655         mac_hi = cpu_to_be16(mac_hi);
9656         mac_lo = cpu_to_be32(mac_lo);
9657         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9658         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9659 }
9660
9661 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9662 {
9663         int port = BP_PORT(bp);
9664         u32 val, val2;
9665         u32 config;
9666         u16 i;
9667         u32 ext_phy_type;
9668
9669         bp->link_params.bp = bp;
9670         bp->link_params.port = port;
9671
9672         bp->link_params.lane_config =
9673                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9674         bp->link_params.ext_phy_config =
9675                 SHMEM_RD(bp,
9676                          dev_info.port_hw_config[port].external_phy_config);
9677         /* BCM8727_NOC => BCM8727 no over current */
9678         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9679             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9680                 bp->link_params.ext_phy_config &=
9681                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9682                 bp->link_params.ext_phy_config |=
9683                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9684                 bp->link_params.feature_config_flags |=
9685                         FEATURE_CONFIG_BCM8727_NOC;
9686         }
9687
9688         bp->link_params.speed_cap_mask =
9689                 SHMEM_RD(bp,
9690                          dev_info.port_hw_config[port].speed_capability_mask);
9691
9692         bp->port.link_config =
9693                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9694
9695         /* Get the 4 lanes xgxs config rx and tx */
9696         for (i = 0; i < 2; i++) {
9697                 val = SHMEM_RD(bp,
9698                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9699                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9700                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9701
9702                 val = SHMEM_RD(bp,
9703                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9704                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9705                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9706         }
9707
9708         /* If the device is capable of WoL, set the default state according
9709          * to the HW
9710          */
9711         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9712         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9713                    (config & PORT_FEATURE_WOL_ENABLED));
9714
9715         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
9716                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
9717                        bp->link_params.lane_config,
9718                        bp->link_params.ext_phy_config,
9719                        bp->link_params.speed_cap_mask, bp->port.link_config);
9720
9721         bp->link_params.switch_cfg |= (bp->port.link_config &
9722                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
9723         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9724
9725         bnx2x_link_settings_requested(bp);
9726
9727         /*
9728          * If connected directly, work with the internal PHY, otherwise, work
9729          * with the external PHY
9730          */
9731         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9732         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9733                 bp->mdio.prtad = bp->link_params.phy_addr;
9734
9735         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9736                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9737                 bp->mdio.prtad =
9738                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9739
9740         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9741         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9742         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9743         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9744         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9745
9746 #ifdef BCM_CNIC
9747         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9748         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9749         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9750 #endif
9751 }
9752
9753 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9754 {
9755         int func = BP_FUNC(bp);
9756         u32 val, val2;
9757         int rc = 0;
9758
9759         bnx2x_get_common_hwinfo(bp);
9760
9761         bp->e1hov = 0;
9762         bp->e1hmf = 0;
9763         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9764                 bp->mf_config =
9765                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9766
9767                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9768                        FUNC_MF_CFG_E1HOV_TAG_MASK);
9769                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9770                         bp->e1hmf = 1;
9771                 BNX2X_DEV_INFO("%s function mode\n",
9772                                IS_E1HMF(bp) ? "multi" : "single");
9773
9774                 if (IS_E1HMF(bp)) {
9775                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9776                                                                 e1hov_tag) &
9777                                FUNC_MF_CFG_E1HOV_TAG_MASK);
9778                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9779                                 bp->e1hov = val;
9780                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9781                                                "(0x%04x)\n",
9782                                                func, bp->e1hov, bp->e1hov);
9783                         } else {
9784                                 BNX2X_ERROR("No valid E1HOV for func %d,"
9785                                             "  aborting\n", func);
9786                                 rc = -EPERM;
9787                         }
9788                 } else {
9789                         if (BP_E1HVN(bp)) {
9790                                 BNX2X_ERROR("VN %d in single function mode,"
9791                                             "  aborting\n", BP_E1HVN(bp));
9792                                 rc = -EPERM;
9793                         }
9794                 }
9795         }
9796
9797         if (!BP_NOMCP(bp)) {
9798                 bnx2x_get_port_hwinfo(bp);
9799
9800                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9801                               DRV_MSG_SEQ_NUMBER_MASK);
9802                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9803         }
9804
9805         if (IS_E1HMF(bp)) {
9806                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9807                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
9808                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9809                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9810                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9811                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9812                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9813                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9814                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
9815                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
9816                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9817                                ETH_ALEN);
9818                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9819                                ETH_ALEN);
9820                 }
9821
9822                 return rc;
9823         }
9824
9825         if (BP_NOMCP(bp)) {
9826                 /* only supposed to happen on emulation/FPGA */
9827                 BNX2X_ERROR("warning: random MAC workaround active\n");
9828                 random_ether_addr(bp->dev->dev_addr);
9829                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9830         }
9831
9832         return rc;
9833 }
9834
9835 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9836 {
9837         int cnt, i, block_end, rodi;
9838         char vpd_data[BNX2X_VPD_LEN+1];
9839         char str_id_reg[VENDOR_ID_LEN+1];
9840         char str_id_cap[VENDOR_ID_LEN+1];
9841         u8 len;
9842
9843         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9844         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9845
9846         if (cnt < BNX2X_VPD_LEN)
9847                 goto out_not_found;
9848
9849         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9850                              PCI_VPD_LRDT_RO_DATA);
9851         if (i < 0)
9852                 goto out_not_found;
9853
9854
9855         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9856                     pci_vpd_lrdt_size(&vpd_data[i]);
9857
9858         i += PCI_VPD_LRDT_TAG_SIZE;
9859
9860         if (block_end > BNX2X_VPD_LEN)
9861                 goto out_not_found;
9862
9863         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9864                                    PCI_VPD_RO_KEYWORD_MFR_ID);
9865         if (rodi < 0)
9866                 goto out_not_found;
9867
9868         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9869
9870         if (len != VENDOR_ID_LEN)
9871                 goto out_not_found;
9872
9873         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9874
9875         /* vendor specific info */
9876         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9877         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9878         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9879             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9880
9881                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9882                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
9883                 if (rodi >= 0) {
9884                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9885
9886                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9887
9888                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9889                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9890                                 bp->fw_ver[len] = ' ';
9891                         }
9892                 }
9893                 return;
9894         }
9895 out_not_found:
9896         return;
9897 }
9898
9899 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9900 {
9901         int func = BP_FUNC(bp);
9902         int timer_interval;
9903         int rc;
9904
9905         /* Disable interrupt handling until HW is initialized */
9906         atomic_set(&bp->intr_sem, 1);
9907         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9908
9909         mutex_init(&bp->port.phy_mutex);
9910         mutex_init(&bp->fw_mb_mutex);
9911 #ifdef BCM_CNIC
9912         mutex_init(&bp->cnic_mutex);
9913 #endif
9914
9915         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9916         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9917
9918         rc = bnx2x_get_hwinfo(bp);
9919
9920         bnx2x_read_fwinfo(bp);
9921         /* need to reset chip if undi was active */
9922         if (!BP_NOMCP(bp))
9923                 bnx2x_undi_unload(bp);
9924
9925         if (CHIP_REV_IS_FPGA(bp))
9926                 dev_err(&bp->pdev->dev, "FPGA detected\n");
9927
9928         if (BP_NOMCP(bp) && (func == 0))
9929                 dev_err(&bp->pdev->dev, "MCP disabled, "
9930                                         "must load devices in order!\n");
9931
9932         /* Set multi queue mode */
9933         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9934             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9935                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9936                                         "requested is not MSI-X\n");
9937                 multi_mode = ETH_RSS_MODE_DISABLED;
9938         }
9939         bp->multi_mode = multi_mode;
9940
9941
9942         bp->dev->features |= NETIF_F_GRO;
9943
9944         /* Set TPA flags */
9945         if (disable_tpa) {
9946                 bp->flags &= ~TPA_ENABLE_FLAG;
9947                 bp->dev->features &= ~NETIF_F_LRO;
9948         } else {
9949                 bp->flags |= TPA_ENABLE_FLAG;
9950                 bp->dev->features |= NETIF_F_LRO;
9951         }
9952
9953         if (CHIP_IS_E1(bp))
9954                 bp->dropless_fc = 0;
9955         else
9956                 bp->dropless_fc = dropless_fc;
9957
9958         bp->mrrs = mrrs;
9959
9960         bp->tx_ring_size = MAX_TX_AVAIL;
9961         bp->rx_ring_size = MAX_RX_AVAIL;
9962
9963         bp->rx_csum = 1;
9964
9965         /* make sure that the numbers are in the right granularity */
9966         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9967         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9968
9969         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9970         bp->current_interval = (poll ? poll : timer_interval);
9971
9972         init_timer(&bp->timer);
9973         bp->timer.expires = jiffies + bp->current_interval;
9974         bp->timer.data = (unsigned long) bp;
9975         bp->timer.function = bnx2x_timer;
9976
9977         return rc;
9978 }
9979
9980 /*
9981  * ethtool service functions
9982  */
9983
9984 /* All ethtool functions called with rtnl_lock */
9985
9986 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9987 {
9988         struct bnx2x *bp = netdev_priv(dev);
9989
9990         cmd->supported = bp->port.supported;
9991         cmd->advertising = bp->port.advertising;
9992
9993         if ((bp->state == BNX2X_STATE_OPEN) &&
9994             !(bp->flags & MF_FUNC_DIS) &&
9995             (bp->link_vars.link_up)) {
9996                 cmd->speed = bp->link_vars.line_speed;
9997                 cmd->duplex = bp->link_vars.duplex;
9998                 if (IS_E1HMF(bp)) {
9999                         u16 vn_max_rate;
10000
10001                         vn_max_rate =
10002                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
10003                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
10004                         if (vn_max_rate < cmd->speed)
10005                                 cmd->speed = vn_max_rate;
10006                 }
10007         } else {
10008                 cmd->speed = -1;
10009                 cmd->duplex = -1;
10010         }
10011
10012         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10013                 u32 ext_phy_type =
10014                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10015
10016                 switch (ext_phy_type) {
10017                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
10018                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
10019                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
10020                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10021                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10022                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
10023                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
10024                         cmd->port = PORT_FIBRE;
10025                         break;
10026
10027                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10028                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10029                         cmd->port = PORT_TP;
10030                         break;
10031
10032                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10033                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10034                                   bp->link_params.ext_phy_config);
10035                         break;
10036
10037                 default:
10038                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10039                            bp->link_params.ext_phy_config);
10040                         break;
10041                 }
10042         } else
10043                 cmd->port = PORT_TP;
10044
10045         cmd->phy_address = bp->mdio.prtad;
10046         cmd->transceiver = XCVR_INTERNAL;
10047
10048         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10049                 cmd->autoneg = AUTONEG_ENABLE;
10050         else
10051                 cmd->autoneg = AUTONEG_DISABLE;
10052
10053         cmd->maxtxpkt = 0;
10054         cmd->maxrxpkt = 0;
10055
10056         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10057            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10058            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10059            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10060            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10061            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10062            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10063
10064         return 0;
10065 }
10066
10067 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10068 {
10069         struct bnx2x *bp = netdev_priv(dev);
10070         u32 advertising;
10071
10072         if (IS_E1HMF(bp))
10073                 return 0;
10074
10075         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10076            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10077            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10078            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10079            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10080            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10081            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10082
10083         if (cmd->autoneg == AUTONEG_ENABLE) {
10084                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10085                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10086                         return -EINVAL;
10087                 }
10088
10089                 /* advertise the requested speed and duplex if supported */
10090                 cmd->advertising &= bp->port.supported;
10091
10092                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10093                 bp->link_params.req_duplex = DUPLEX_FULL;
10094                 bp->port.advertising |= (ADVERTISED_Autoneg |
10095                                          cmd->advertising);
10096
10097         } else { /* forced speed */
10098                 /* advertise the requested speed and duplex if supported */
10099                 switch (cmd->speed) {
10100                 case SPEED_10:
10101                         if (cmd->duplex == DUPLEX_FULL) {
10102                                 if (!(bp->port.supported &
10103                                       SUPPORTED_10baseT_Full)) {
10104                                         DP(NETIF_MSG_LINK,
10105                                            "10M full not supported\n");
10106                                         return -EINVAL;
10107                                 }
10108
10109                                 advertising = (ADVERTISED_10baseT_Full |
10110                                                ADVERTISED_TP);
10111                         } else {
10112                                 if (!(bp->port.supported &
10113                                       SUPPORTED_10baseT_Half)) {
10114                                         DP(NETIF_MSG_LINK,
10115                                            "10M half not supported\n");
10116                                         return -EINVAL;
10117                                 }
10118
10119                                 advertising = (ADVERTISED_10baseT_Half |
10120                                                ADVERTISED_TP);
10121                         }
10122                         break;
10123
10124                 case SPEED_100:
10125                         if (cmd->duplex == DUPLEX_FULL) {
10126                                 if (!(bp->port.supported &
10127                                                 SUPPORTED_100baseT_Full)) {
10128                                         DP(NETIF_MSG_LINK,
10129                                            "100M full not supported\n");
10130                                         return -EINVAL;
10131                                 }
10132
10133                                 advertising = (ADVERTISED_100baseT_Full |
10134                                                ADVERTISED_TP);
10135                         } else {
10136                                 if (!(bp->port.supported &
10137                                                 SUPPORTED_100baseT_Half)) {
10138                                         DP(NETIF_MSG_LINK,
10139                                            "100M half not supported\n");
10140                                         return -EINVAL;
10141                                 }
10142
10143                                 advertising = (ADVERTISED_100baseT_Half |
10144                                                ADVERTISED_TP);
10145                         }
10146                         break;
10147
10148                 case SPEED_1000:
10149                         if (cmd->duplex != DUPLEX_FULL) {
10150                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
10151                                 return -EINVAL;
10152                         }
10153
10154                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10155                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
10156                                 return -EINVAL;
10157                         }
10158
10159                         advertising = (ADVERTISED_1000baseT_Full |
10160                                        ADVERTISED_TP);
10161                         break;
10162
10163                 case SPEED_2500:
10164                         if (cmd->duplex != DUPLEX_FULL) {
10165                                 DP(NETIF_MSG_LINK,
10166                                    "2.5G half not supported\n");
10167                                 return -EINVAL;
10168                         }
10169
10170                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10171                                 DP(NETIF_MSG_LINK,
10172                                    "2.5G full not supported\n");
10173                                 return -EINVAL;
10174                         }
10175
10176                         advertising = (ADVERTISED_2500baseX_Full |
10177                                        ADVERTISED_TP);
10178                         break;
10179
10180                 case SPEED_10000:
10181                         if (cmd->duplex != DUPLEX_FULL) {
10182                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
10183                                 return -EINVAL;
10184                         }
10185
10186                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10187                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
10188                                 return -EINVAL;
10189                         }
10190
10191                         advertising = (ADVERTISED_10000baseT_Full |
10192                                        ADVERTISED_FIBRE);
10193                         break;
10194
10195                 default:
10196                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
10197                         return -EINVAL;
10198                 }
10199
10200                 bp->link_params.req_line_speed = cmd->speed;
10201                 bp->link_params.req_duplex = cmd->duplex;
10202                 bp->port.advertising = advertising;
10203         }
10204
10205         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10206            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
10207            bp->link_params.req_line_speed, bp->link_params.req_duplex,
10208            bp->port.advertising);
10209
10210         if (netif_running(dev)) {
10211                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10212                 bnx2x_link_set(bp);
10213         }
10214
10215         return 0;
10216 }
10217
10218 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10219 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10220
10221 static int bnx2x_get_regs_len(struct net_device *dev)
10222 {
10223         struct bnx2x *bp = netdev_priv(dev);
10224         int regdump_len = 0;
10225         int i;
10226
10227         if (CHIP_IS_E1(bp)) {
10228                 for (i = 0; i < REGS_COUNT; i++)
10229                         if (IS_E1_ONLINE(reg_addrs[i].info))
10230                                 regdump_len += reg_addrs[i].size;
10231
10232                 for (i = 0; i < WREGS_COUNT_E1; i++)
10233                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10234                                 regdump_len += wreg_addrs_e1[i].size *
10235                                         (1 + wreg_addrs_e1[i].read_regs_count);
10236
10237         } else { /* E1H */
10238                 for (i = 0; i < REGS_COUNT; i++)
10239                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10240                                 regdump_len += reg_addrs[i].size;
10241
10242                 for (i = 0; i < WREGS_COUNT_E1H; i++)
10243                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10244                                 regdump_len += wreg_addrs_e1h[i].size *
10245                                         (1 + wreg_addrs_e1h[i].read_regs_count);
10246         }
10247         regdump_len *= 4;
10248         regdump_len += sizeof(struct dump_hdr);
10249
10250         return regdump_len;
10251 }
10252
10253 static void bnx2x_get_regs(struct net_device *dev,
10254                            struct ethtool_regs *regs, void *_p)
10255 {
10256         u32 *p = _p, i, j;
10257         struct bnx2x *bp = netdev_priv(dev);
10258         struct dump_hdr dump_hdr = {0};
10259
10260         regs->version = 0;
10261         memset(p, 0, regs->len);
10262
10263         if (!netif_running(bp->dev))
10264                 return;
10265
10266         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10267         dump_hdr.dump_sign = dump_sign_all;
10268         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10269         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10270         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10271         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10272         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10273
10274         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10275         p += dump_hdr.hdr_size + 1;
10276
10277         if (CHIP_IS_E1(bp)) {
10278                 for (i = 0; i < REGS_COUNT; i++)
10279                         if (IS_E1_ONLINE(reg_addrs[i].info))
10280                                 for (j = 0; j < reg_addrs[i].size; j++)
10281                                         *p++ = REG_RD(bp,
10282                                                       reg_addrs[i].addr + j*4);
10283
10284         } else { /* E1H */
10285                 for (i = 0; i < REGS_COUNT; i++)
10286                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10287                                 for (j = 0; j < reg_addrs[i].size; j++)
10288                                         *p++ = REG_RD(bp,
10289                                                       reg_addrs[i].addr + j*4);
10290         }
10291 }
10292
10293 #define PHY_FW_VER_LEN                  10
10294
10295 static void bnx2x_get_drvinfo(struct net_device *dev,
10296                               struct ethtool_drvinfo *info)
10297 {
10298         struct bnx2x *bp = netdev_priv(dev);
10299         u8 phy_fw_ver[PHY_FW_VER_LEN];
10300
10301         strcpy(info->driver, DRV_MODULE_NAME);
10302         strcpy(info->version, DRV_MODULE_VERSION);
10303
10304         phy_fw_ver[0] = '\0';
10305         if (bp->port.pmf) {
10306                 bnx2x_acquire_phy_lock(bp);
10307                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10308                                              (bp->state != BNX2X_STATE_CLOSED),
10309                                              phy_fw_ver, PHY_FW_VER_LEN);
10310                 bnx2x_release_phy_lock(bp);
10311         }
10312
10313         strncpy(info->fw_version, bp->fw_ver, 32);
10314         snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10315                  "bc %d.%d.%d%s%s",
10316                  (bp->common.bc_ver & 0xff0000) >> 16,
10317                  (bp->common.bc_ver & 0xff00) >> 8,
10318                  (bp->common.bc_ver & 0xff),
10319                  ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10320         strcpy(info->bus_info, pci_name(bp->pdev));
10321         info->n_stats = BNX2X_NUM_STATS;
10322         info->testinfo_len = BNX2X_NUM_TESTS;
10323         info->eedump_len = bp->common.flash_size;
10324         info->regdump_len = bnx2x_get_regs_len(dev);
10325 }
10326
10327 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10328 {
10329         struct bnx2x *bp = netdev_priv(dev);
10330
10331         if (bp->flags & NO_WOL_FLAG) {
10332                 wol->supported = 0;
10333                 wol->wolopts = 0;
10334         } else {
10335                 wol->supported = WAKE_MAGIC;
10336                 if (bp->wol)
10337                         wol->wolopts = WAKE_MAGIC;
10338                 else
10339                         wol->wolopts = 0;
10340         }
10341         memset(&wol->sopass, 0, sizeof(wol->sopass));
10342 }
10343
10344 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10345 {
10346         struct bnx2x *bp = netdev_priv(dev);
10347
10348         if (wol->wolopts & ~WAKE_MAGIC)
10349                 return -EINVAL;
10350
10351         if (wol->wolopts & WAKE_MAGIC) {
10352                 if (bp->flags & NO_WOL_FLAG)
10353                         return -EINVAL;
10354
10355                 bp->wol = 1;
10356         } else
10357                 bp->wol = 0;
10358
10359         return 0;
10360 }
10361
10362 static u32 bnx2x_get_msglevel(struct net_device *dev)
10363 {
10364         struct bnx2x *bp = netdev_priv(dev);
10365
10366         return bp->msg_enable;
10367 }
10368
10369 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10370 {
10371         struct bnx2x *bp = netdev_priv(dev);
10372
10373         if (capable(CAP_NET_ADMIN))
10374                 bp->msg_enable = level;
10375 }
10376
10377 static int bnx2x_nway_reset(struct net_device *dev)
10378 {
10379         struct bnx2x *bp = netdev_priv(dev);
10380
10381         if (!bp->port.pmf)
10382                 return 0;
10383
10384         if (netif_running(dev)) {
10385                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10386                 bnx2x_link_set(bp);
10387         }
10388
10389         return 0;
10390 }
10391
10392 static u32 bnx2x_get_link(struct net_device *dev)
10393 {
10394         struct bnx2x *bp = netdev_priv(dev);
10395
10396         if (bp->flags & MF_FUNC_DIS)
10397                 return 0;
10398
10399         return bp->link_vars.link_up;
10400 }
10401
10402 static int bnx2x_get_eeprom_len(struct net_device *dev)
10403 {
10404         struct bnx2x *bp = netdev_priv(dev);
10405
10406         return bp->common.flash_size;
10407 }
10408
10409 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10410 {
10411         int port = BP_PORT(bp);
10412         int count, i;
10413         u32 val = 0;
10414
10415         /* adjust timeout for emulation/FPGA */
10416         count = NVRAM_TIMEOUT_COUNT;
10417         if (CHIP_REV_IS_SLOW(bp))
10418                 count *= 100;
10419
10420         /* request access to nvram interface */
10421         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10422                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10423
10424         for (i = 0; i < count*10; i++) {
10425                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10426                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10427                         break;
10428
10429                 udelay(5);
10430         }
10431
10432         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10433                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10434                 return -EBUSY;
10435         }
10436
10437         return 0;
10438 }
10439
10440 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10441 {
10442         int port = BP_PORT(bp);
10443         int count, i;
10444         u32 val = 0;
10445
10446         /* adjust timeout for emulation/FPGA */
10447         count = NVRAM_TIMEOUT_COUNT;
10448         if (CHIP_REV_IS_SLOW(bp))
10449                 count *= 100;
10450
10451         /* relinquish nvram interface */
10452         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10453                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10454
10455         for (i = 0; i < count*10; i++) {
10456                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10457                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10458                         break;
10459
10460                 udelay(5);
10461         }
10462
10463         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10464                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10465                 return -EBUSY;
10466         }
10467
10468         return 0;
10469 }
10470
10471 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10472 {
10473         u32 val;
10474
10475         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10476
10477         /* enable both bits, even on read */
10478         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10479                (val | MCPR_NVM_ACCESS_ENABLE_EN |
10480                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
10481 }
10482
10483 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10484 {
10485         u32 val;
10486
10487         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10488
10489         /* disable both bits, even after read */
10490         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10491                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10492                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10493 }
10494
10495 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10496                                   u32 cmd_flags)
10497 {
10498         int count, i, rc;
10499         u32 val;
10500
10501         /* build the command word */
10502         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10503
10504         /* need to clear DONE bit separately */
10505         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10506
10507         /* address of the NVRAM to read from */
10508         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10509                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10510
10511         /* issue a read command */
10512         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10513
10514         /* adjust timeout for emulation/FPGA */
10515         count = NVRAM_TIMEOUT_COUNT;
10516         if (CHIP_REV_IS_SLOW(bp))
10517                 count *= 100;
10518
10519         /* wait for completion */
10520         *ret_val = 0;
10521         rc = -EBUSY;
10522         for (i = 0; i < count; i++) {
10523                 udelay(5);
10524                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10525
10526                 if (val & MCPR_NVM_COMMAND_DONE) {
10527                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10528                         /* we read nvram data in cpu order
10529                          * but ethtool sees it as an array of bytes
10530                          * converting to big-endian will do the work */
10531                         *ret_val = cpu_to_be32(val);
10532                         rc = 0;
10533                         break;
10534                 }
10535         }
10536
10537         return rc;
10538 }
10539
10540 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10541                             int buf_size)
10542 {
10543         int rc;
10544         u32 cmd_flags;
10545         __be32 val;
10546
10547         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10548                 DP(BNX2X_MSG_NVM,
10549                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10550                    offset, buf_size);
10551                 return -EINVAL;
10552         }
10553
10554         if (offset + buf_size > bp->common.flash_size) {
10555                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10556                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10557                    offset, buf_size, bp->common.flash_size);
10558                 return -EINVAL;
10559         }
10560
10561         /* request access to nvram interface */
10562         rc = bnx2x_acquire_nvram_lock(bp);
10563         if (rc)
10564                 return rc;
10565
10566         /* enable access to nvram interface */
10567         bnx2x_enable_nvram_access(bp);
10568
10569         /* read the first word(s) */
10570         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10571         while ((buf_size > sizeof(u32)) && (rc == 0)) {
10572                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10573                 memcpy(ret_buf, &val, 4);
10574
10575                 /* advance to the next dword */
10576                 offset += sizeof(u32);
10577                 ret_buf += sizeof(u32);
10578                 buf_size -= sizeof(u32);
10579                 cmd_flags = 0;
10580         }
10581
10582         if (rc == 0) {
10583                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10584                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10585                 memcpy(ret_buf, &val, 4);
10586         }
10587
10588         /* disable access to nvram interface */
10589         bnx2x_disable_nvram_access(bp);
10590         bnx2x_release_nvram_lock(bp);
10591
10592         return rc;
10593 }
10594
10595 static int bnx2x_get_eeprom(struct net_device *dev,
10596                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10597 {
10598         struct bnx2x *bp = netdev_priv(dev);
10599         int rc;
10600
10601         if (!netif_running(dev))
10602                 return -EAGAIN;
10603
10604         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10605            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10606            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10607            eeprom->len, eeprom->len);
10608
10609         /* parameters already validated in ethtool_get_eeprom */
10610
10611         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10612
10613         return rc;
10614 }
10615
10616 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10617                                    u32 cmd_flags)
10618 {
10619         int count, i, rc;
10620
10621         /* build the command word */
10622         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10623
10624         /* need to clear DONE bit separately */
10625         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10626
10627         /* write the data */
10628         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10629
10630         /* address of the NVRAM to write to */
10631         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10632                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10633
10634         /* issue the write command */
10635         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10636
10637         /* adjust timeout for emulation/FPGA */
10638         count = NVRAM_TIMEOUT_COUNT;
10639         if (CHIP_REV_IS_SLOW(bp))
10640                 count *= 100;
10641
10642         /* wait for completion */
10643         rc = -EBUSY;
10644         for (i = 0; i < count; i++) {
10645                 udelay(5);
10646                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10647                 if (val & MCPR_NVM_COMMAND_DONE) {
10648                         rc = 0;
10649                         break;
10650                 }
10651         }
10652
10653         return rc;
10654 }
10655
10656 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
10657
10658 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10659                               int buf_size)
10660 {
10661         int rc;
10662         u32 cmd_flags;
10663         u32 align_offset;
10664         __be32 val;
10665
10666         if (offset + buf_size > bp->common.flash_size) {
10667                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10668                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10669                    offset, buf_size, bp->common.flash_size);
10670                 return -EINVAL;
10671         }
10672
10673         /* request access to nvram interface */
10674         rc = bnx2x_acquire_nvram_lock(bp);
10675         if (rc)
10676                 return rc;
10677
10678         /* enable access to nvram interface */
10679         bnx2x_enable_nvram_access(bp);
10680
10681         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10682         align_offset = (offset & ~0x03);
10683         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10684
10685         if (rc == 0) {
10686                 val &= ~(0xff << BYTE_OFFSET(offset));
10687                 val |= (*data_buf << BYTE_OFFSET(offset));
10688
10689                 /* nvram data is returned as an array of bytes
10690                  * convert it back to cpu order */
10691                 val = be32_to_cpu(val);
10692
10693                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10694                                              cmd_flags);
10695         }
10696
10697         /* disable access to nvram interface */
10698         bnx2x_disable_nvram_access(bp);
10699         bnx2x_release_nvram_lock(bp);
10700
10701         return rc;
10702 }
10703
10704 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10705                              int buf_size)
10706 {
10707         int rc;
10708         u32 cmd_flags;
10709         u32 val;
10710         u32 written_so_far;
10711
10712         if (buf_size == 1)      /* ethtool */
10713                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10714
10715         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10716                 DP(BNX2X_MSG_NVM,
10717                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10718                    offset, buf_size);
10719                 return -EINVAL;
10720         }
10721
10722         if (offset + buf_size > bp->common.flash_size) {
10723                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10724                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10725                    offset, buf_size, bp->common.flash_size);
10726                 return -EINVAL;
10727         }
10728
10729         /* request access to nvram interface */
10730         rc = bnx2x_acquire_nvram_lock(bp);
10731         if (rc)
10732                 return rc;
10733
10734         /* enable access to nvram interface */
10735         bnx2x_enable_nvram_access(bp);
10736
10737         written_so_far = 0;
10738         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10739         while ((written_so_far < buf_size) && (rc == 0)) {
10740                 if (written_so_far == (buf_size - sizeof(u32)))
10741                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10742                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10743                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10744                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10745                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10746
10747                 memcpy(&val, data_buf, 4);
10748
10749                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10750
10751                 /* advance to the next dword */
10752                 offset += sizeof(u32);
10753                 data_buf += sizeof(u32);
10754                 written_so_far += sizeof(u32);
10755                 cmd_flags = 0;
10756         }
10757
10758         /* disable access to nvram interface */
10759         bnx2x_disable_nvram_access(bp);
10760         bnx2x_release_nvram_lock(bp);
10761
10762         return rc;
10763 }
10764
10765 static int bnx2x_set_eeprom(struct net_device *dev,
10766                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10767 {
10768         struct bnx2x *bp = netdev_priv(dev);
10769         int port = BP_PORT(bp);
10770         int rc = 0;
10771
10772         if (!netif_running(dev))
10773                 return -EAGAIN;
10774
10775         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10776            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10777            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10778            eeprom->len, eeprom->len);
10779
10780         /* parameters already validated in ethtool_set_eeprom */
10781
10782         /* PHY eeprom can be accessed only by the PMF */
10783         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10784             !bp->port.pmf)
10785                 return -EINVAL;
10786
10787         if (eeprom->magic == 0x50485950) {
10788                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10789                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10790
10791                 bnx2x_acquire_phy_lock(bp);
10792                 rc |= bnx2x_link_reset(&bp->link_params,
10793                                        &bp->link_vars, 0);
10794                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10795                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10796                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10797                                        MISC_REGISTERS_GPIO_HIGH, port);
10798                 bnx2x_release_phy_lock(bp);
10799                 bnx2x_link_report(bp);
10800
10801         } else if (eeprom->magic == 0x50485952) {
10802                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10803                 if (bp->state == BNX2X_STATE_OPEN) {
10804                         bnx2x_acquire_phy_lock(bp);
10805                         rc |= bnx2x_link_reset(&bp->link_params,
10806                                                &bp->link_vars, 1);
10807
10808                         rc |= bnx2x_phy_init(&bp->link_params,
10809                                              &bp->link_vars);
10810                         bnx2x_release_phy_lock(bp);
10811                         bnx2x_calc_fc_adv(bp);
10812                 }
10813         } else if (eeprom->magic == 0x53985943) {
10814                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10815                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10816                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10817                         u8 ext_phy_addr =
10818                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10819
10820                         /* DSP Remove Download Mode */
10821                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10822                                        MISC_REGISTERS_GPIO_LOW, port);
10823
10824                         bnx2x_acquire_phy_lock(bp);
10825
10826                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10827
10828                         /* wait 0.5 sec to allow it to run */
10829                         msleep(500);
10830                         bnx2x_ext_phy_hw_reset(bp, port);
10831                         msleep(500);
10832                         bnx2x_release_phy_lock(bp);
10833                 }
10834         } else
10835                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10836
10837         return rc;
10838 }
10839
10840 static int bnx2x_get_coalesce(struct net_device *dev,
10841                               struct ethtool_coalesce *coal)
10842 {
10843         struct bnx2x *bp = netdev_priv(dev);
10844
10845         memset(coal, 0, sizeof(struct ethtool_coalesce));
10846
10847         coal->rx_coalesce_usecs = bp->rx_ticks;
10848         coal->tx_coalesce_usecs = bp->tx_ticks;
10849
10850         return 0;
10851 }
10852
10853 static int bnx2x_set_coalesce(struct net_device *dev,
10854                               struct ethtool_coalesce *coal)
10855 {
10856         struct bnx2x *bp = netdev_priv(dev);
10857
10858         bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10859         if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10860                 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10861
10862         bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10863         if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10864                 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10865
10866         if (netif_running(dev))
10867                 bnx2x_update_coalesce(bp);
10868
10869         return 0;
10870 }
10871
10872 static void bnx2x_get_ringparam(struct net_device *dev,
10873                                 struct ethtool_ringparam *ering)
10874 {
10875         struct bnx2x *bp = netdev_priv(dev);
10876
10877         ering->rx_max_pending = MAX_RX_AVAIL;
10878         ering->rx_mini_max_pending = 0;
10879         ering->rx_jumbo_max_pending = 0;
10880
10881         ering->rx_pending = bp->rx_ring_size;
10882         ering->rx_mini_pending = 0;
10883         ering->rx_jumbo_pending = 0;
10884
10885         ering->tx_max_pending = MAX_TX_AVAIL;
10886         ering->tx_pending = bp->tx_ring_size;
10887 }
10888
10889 static int bnx2x_set_ringparam(struct net_device *dev,
10890                                struct ethtool_ringparam *ering)
10891 {
10892         struct bnx2x *bp = netdev_priv(dev);
10893         int rc = 0;
10894
10895         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10896                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10897                 return -EAGAIN;
10898         }
10899
10900         if ((ering->rx_pending > MAX_RX_AVAIL) ||
10901             (ering->tx_pending > MAX_TX_AVAIL) ||
10902             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10903                 return -EINVAL;
10904
10905         bp->rx_ring_size = ering->rx_pending;
10906         bp->tx_ring_size = ering->tx_pending;
10907
10908         if (netif_running(dev)) {
10909                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10910                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10911         }
10912
10913         return rc;
10914 }
10915
10916 static void bnx2x_get_pauseparam(struct net_device *dev,
10917                                  struct ethtool_pauseparam *epause)
10918 {
10919         struct bnx2x *bp = netdev_priv(dev);
10920
10921         epause->autoneg = (bp->link_params.req_flow_ctrl ==
10922                            BNX2X_FLOW_CTRL_AUTO) &&
10923                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10924
10925         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10926                             BNX2X_FLOW_CTRL_RX);
10927         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10928                             BNX2X_FLOW_CTRL_TX);
10929
10930         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10931            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10932            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10933 }
10934
10935 static int bnx2x_set_pauseparam(struct net_device *dev,
10936                                 struct ethtool_pauseparam *epause)
10937 {
10938         struct bnx2x *bp = netdev_priv(dev);
10939
10940         if (IS_E1HMF(bp))
10941                 return 0;
10942
10943         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10944            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10945            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10946
10947         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10948
10949         if (epause->rx_pause)
10950                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10951
10952         if (epause->tx_pause)
10953                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10954
10955         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10956                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10957
10958         if (epause->autoneg) {
10959                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10960                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
10961                         return -EINVAL;
10962                 }
10963
10964                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10965                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10966         }
10967
10968         DP(NETIF_MSG_LINK,
10969            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10970
10971         if (netif_running(dev)) {
10972                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10973                 bnx2x_link_set(bp);
10974         }
10975
10976         return 0;
10977 }
10978
10979 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10980 {
10981         struct bnx2x *bp = netdev_priv(dev);
10982         int changed = 0;
10983         int rc = 0;
10984
10985         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10986                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10987                 return -EAGAIN;
10988         }
10989
10990         /* TPA requires Rx CSUM offloading */
10991         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10992                 if (!disable_tpa) {
10993                         if (!(dev->features & NETIF_F_LRO)) {
10994                                 dev->features |= NETIF_F_LRO;
10995                                 bp->flags |= TPA_ENABLE_FLAG;
10996                                 changed = 1;
10997                         }
10998                 } else
10999                         rc = -EINVAL;
11000         } else if (dev->features & NETIF_F_LRO) {
11001                 dev->features &= ~NETIF_F_LRO;
11002                 bp->flags &= ~TPA_ENABLE_FLAG;
11003                 changed = 1;
11004         }
11005
11006         if (data & ETH_FLAG_RXHASH)
11007                 dev->features |= NETIF_F_RXHASH;
11008         else
11009                 dev->features &= ~NETIF_F_RXHASH;
11010
11011         if (changed && netif_running(dev)) {
11012                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11013                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11014         }
11015
11016         return rc;
11017 }
11018
11019 static u32 bnx2x_get_rx_csum(struct net_device *dev)
11020 {
11021         struct bnx2x *bp = netdev_priv(dev);
11022
11023         return bp->rx_csum;
11024 }
11025
11026 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11027 {
11028         struct bnx2x *bp = netdev_priv(dev);
11029         int rc = 0;
11030
11031         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11032                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11033                 return -EAGAIN;
11034         }
11035
11036         bp->rx_csum = data;
11037
11038         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11039            TPA'ed packets will be discarded due to wrong TCP CSUM */
11040         if (!data) {
11041                 u32 flags = ethtool_op_get_flags(dev);
11042
11043                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11044         }
11045
11046         return rc;
11047 }
11048
11049 static int bnx2x_set_tso(struct net_device *dev, u32 data)
11050 {
11051         if (data) {
11052                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11053                 dev->features |= NETIF_F_TSO6;
11054         } else {
11055                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11056                 dev->features &= ~NETIF_F_TSO6;
11057         }
11058
11059         return 0;
11060 }
11061
11062 static const struct {
11063         char string[ETH_GSTRING_LEN];
11064 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11065         { "register_test (offline)" },
11066         { "memory_test (offline)" },
11067         { "loopback_test (offline)" },
11068         { "nvram_test (online)" },
11069         { "interrupt_test (online)" },
11070         { "link_test (online)" },
11071         { "idle check (online)" }
11072 };
11073
11074 static int bnx2x_test_registers(struct bnx2x *bp)
11075 {
11076         int idx, i, rc = -ENODEV;
11077         u32 wr_val = 0;
11078         int port = BP_PORT(bp);
11079         static const struct {
11080                 u32 offset0;
11081                 u32 offset1;
11082                 u32 mask;
11083         } reg_tbl[] = {
11084 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
11085                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
11086                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
11087                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
11088                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
11089                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
11090                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
11091                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
11092                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
11093                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
11094 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
11095                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
11096                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
11097                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
11098                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
11099                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11100                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
11101                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
11102                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
11103                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
11104 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
11105                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
11106                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
11107                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
11108                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
11109                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
11110                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
11111                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
11112                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
11113                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
11114 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
11115                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
11116                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
11117                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11118                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
11119                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11120                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
11121
11122                 { 0xffffffff, 0, 0x00000000 }
11123         };
11124
11125         if (!netif_running(bp->dev))
11126                 return rc;
11127
11128         /* Repeat the test twice:
11129            First by writing 0x00000000, second by writing 0xffffffff */
11130         for (idx = 0; idx < 2; idx++) {
11131
11132                 switch (idx) {
11133                 case 0:
11134                         wr_val = 0;
11135                         break;
11136                 case 1:
11137                         wr_val = 0xffffffff;
11138                         break;
11139                 }
11140
11141                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11142                         u32 offset, mask, save_val, val;
11143
11144                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11145                         mask = reg_tbl[i].mask;
11146
11147                         save_val = REG_RD(bp, offset);
11148
11149                         REG_WR(bp, offset, (wr_val & mask));
11150                         val = REG_RD(bp, offset);
11151
11152                         /* Restore the original register's value */
11153                         REG_WR(bp, offset, save_val);
11154
11155                         /* verify value is as expected */
11156                         if ((val & mask) != (wr_val & mask)) {
11157                                 DP(NETIF_MSG_PROBE,
11158                                    "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11159                                    offset, val, wr_val, mask);
11160                                 goto test_reg_exit;
11161                         }
11162                 }
11163         }
11164
11165         rc = 0;
11166
11167 test_reg_exit:
11168         return rc;
11169 }
11170
11171 static int bnx2x_test_memory(struct bnx2x *bp)
11172 {
11173         int i, j, rc = -ENODEV;
11174         u32 val;
11175         static const struct {
11176                 u32 offset;
11177                 int size;
11178         } mem_tbl[] = {
11179                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
11180                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11181                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
11182                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
11183                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
11184                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
11185                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
11186
11187                 { 0xffffffff, 0 }
11188         };
11189         static const struct {
11190                 char *name;
11191                 u32 offset;
11192                 u32 e1_mask;
11193                 u32 e1h_mask;
11194         } prty_tbl[] = {
11195                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
11196                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
11197                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
11198                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
11199                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
11200                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
11201
11202                 { NULL, 0xffffffff, 0, 0 }
11203         };
11204
11205         if (!netif_running(bp->dev))
11206                 return rc;
11207
11208         /* Go through all the memories */
11209         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11210                 for (j = 0; j < mem_tbl[i].size; j++)
11211                         REG_RD(bp, mem_tbl[i].offset + j*4);
11212
11213         /* Check the parity status */
11214         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11215                 val = REG_RD(bp, prty_tbl[i].offset);
11216                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11217                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11218                         DP(NETIF_MSG_HW,
11219                            "%s is 0x%x\n", prty_tbl[i].name, val);
11220                         goto test_mem_exit;
11221                 }
11222         }
11223
11224         rc = 0;
11225
11226 test_mem_exit:
11227         return rc;
11228 }
11229
11230 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11231 {
11232         int cnt = 1000;
11233
11234         if (link_up)
11235                 while (bnx2x_link_test(bp) && cnt--)
11236                         msleep(10);
11237 }
11238
11239 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11240 {
11241         unsigned int pkt_size, num_pkts, i;
11242         struct sk_buff *skb;
11243         unsigned char *packet;
11244         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11245         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11246         u16 tx_start_idx, tx_idx;
11247         u16 rx_start_idx, rx_idx;
11248         u16 pkt_prod, bd_prod;
11249         struct sw_tx_bd *tx_buf;
11250         struct eth_tx_start_bd *tx_start_bd;
11251         struct eth_tx_parse_bd *pbd = NULL;
11252         dma_addr_t mapping;
11253         union eth_rx_cqe *cqe;
11254         u8 cqe_fp_flags;
11255         struct sw_rx_bd *rx_buf;
11256         u16 len;
11257         int rc = -ENODEV;
11258
11259         /* check the loopback mode */
11260         switch (loopback_mode) {
11261         case BNX2X_PHY_LOOPBACK:
11262                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11263                         return -EINVAL;
11264                 break;
11265         case BNX2X_MAC_LOOPBACK:
11266                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11267                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11268                 break;
11269         default:
11270                 return -EINVAL;
11271         }
11272
11273         /* prepare the loopback packet */
11274         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11275                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11276         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11277         if (!skb) {
11278                 rc = -ENOMEM;
11279                 goto test_loopback_exit;
11280         }
11281         packet = skb_put(skb, pkt_size);
11282         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11283         memset(packet + ETH_ALEN, 0, ETH_ALEN);
11284         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11285         for (i = ETH_HLEN; i < pkt_size; i++)
11286                 packet[i] = (unsigned char) (i & 0xff);
11287
11288         /* send the loopback packet */
11289         num_pkts = 0;
11290         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11291         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11292
11293         pkt_prod = fp_tx->tx_pkt_prod++;
11294         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11295         tx_buf->first_bd = fp_tx->tx_bd_prod;
11296         tx_buf->skb = skb;
11297         tx_buf->flags = 0;
11298
11299         bd_prod = TX_BD(fp_tx->tx_bd_prod);
11300         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11301         mapping = dma_map_single(&bp->pdev->dev, skb->data,
11302                                  skb_headlen(skb), DMA_TO_DEVICE);
11303         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11304         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11305         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11306         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11307         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11308         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11309         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11310                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11311
11312         /* turn on parsing and get a BD */
11313         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11314         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11315
11316         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11317
11318         wmb();
11319
11320         fp_tx->tx_db.data.prod += 2;
11321         barrier();
11322         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11323
11324         mmiowb();
11325
11326         num_pkts++;
11327         fp_tx->tx_bd_prod += 2; /* start + pbd */
11328
11329         udelay(100);
11330
11331         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11332         if (tx_idx != tx_start_idx + num_pkts)
11333                 goto test_loopback_exit;
11334
11335         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11336         if (rx_idx != rx_start_idx + num_pkts)
11337                 goto test_loopback_exit;
11338
11339         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11340         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11341         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11342                 goto test_loopback_rx_exit;
11343
11344         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11345         if (len != pkt_size)
11346                 goto test_loopback_rx_exit;
11347
11348         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11349         skb = rx_buf->skb;
11350         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11351         for (i = ETH_HLEN; i < pkt_size; i++)
11352                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11353                         goto test_loopback_rx_exit;
11354
11355         rc = 0;
11356
11357 test_loopback_rx_exit:
11358
11359         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11360         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11361         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11362         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11363
11364         /* Update producers */
11365         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11366                              fp_rx->rx_sge_prod);
11367
11368 test_loopback_exit:
11369         bp->link_params.loopback_mode = LOOPBACK_NONE;
11370
11371         return rc;
11372 }
11373
11374 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11375 {
11376         int rc = 0, res;
11377
11378         if (BP_NOMCP(bp))
11379                 return rc;
11380
11381         if (!netif_running(bp->dev))
11382                 return BNX2X_LOOPBACK_FAILED;
11383
11384         bnx2x_netif_stop(bp, 1);
11385         bnx2x_acquire_phy_lock(bp);
11386
11387         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11388         if (res) {
11389                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
11390                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11391         }
11392
11393         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11394         if (res) {
11395                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
11396                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11397         }
11398
11399         bnx2x_release_phy_lock(bp);
11400         bnx2x_netif_start(bp);
11401
11402         return rc;
11403 }
11404
11405 #define CRC32_RESIDUAL                  0xdebb20e3
11406
11407 static int bnx2x_test_nvram(struct bnx2x *bp)
11408 {
11409         static const struct {
11410                 int offset;
11411                 int size;
11412         } nvram_tbl[] = {
11413                 {     0,  0x14 }, /* bootstrap */
11414                 {  0x14,  0xec }, /* dir */
11415                 { 0x100, 0x350 }, /* manuf_info */
11416                 { 0x450,  0xf0 }, /* feature_info */
11417                 { 0x640,  0x64 }, /* upgrade_key_info */
11418                 { 0x6a4,  0x64 },
11419                 { 0x708,  0x70 }, /* manuf_key_info */
11420                 { 0x778,  0x70 },
11421                 {     0,     0 }
11422         };
11423         __be32 buf[0x350 / 4];
11424         u8 *data = (u8 *)buf;
11425         int i, rc;
11426         u32 magic, crc;
11427
11428         if (BP_NOMCP(bp))
11429                 return 0;
11430
11431         rc = bnx2x_nvram_read(bp, 0, data, 4);
11432         if (rc) {
11433                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11434                 goto test_nvram_exit;
11435         }
11436
11437         magic = be32_to_cpu(buf[0]);
11438         if (magic != 0x669955aa) {
11439                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11440                 rc = -ENODEV;
11441                 goto test_nvram_exit;
11442         }
11443
11444         for (i = 0; nvram_tbl[i].size; i++) {
11445
11446                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11447                                       nvram_tbl[i].size);
11448                 if (rc) {
11449                         DP(NETIF_MSG_PROBE,
11450                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11451                         goto test_nvram_exit;
11452                 }
11453
11454                 crc = ether_crc_le(nvram_tbl[i].size, data);
11455                 if (crc != CRC32_RESIDUAL) {
11456                         DP(NETIF_MSG_PROBE,
11457                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11458                         rc = -ENODEV;
11459                         goto test_nvram_exit;
11460                 }
11461         }
11462
11463 test_nvram_exit:
11464         return rc;
11465 }
11466
11467 static int bnx2x_test_intr(struct bnx2x *bp)
11468 {
11469         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11470         int i, rc;
11471
11472         if (!netif_running(bp->dev))
11473                 return -ENODEV;
11474
11475         config->hdr.length = 0;
11476         if (CHIP_IS_E1(bp))
11477                 /* use last unicast entries */
11478                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11479         else
11480                 config->hdr.offset = BP_FUNC(bp);
11481         config->hdr.client_id = bp->fp->cl_id;
11482         config->hdr.reserved1 = 0;
11483
11484         bp->set_mac_pending++;
11485         smp_wmb();
11486         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11487                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11488                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11489         if (rc == 0) {
11490                 for (i = 0; i < 10; i++) {
11491                         if (!bp->set_mac_pending)
11492                                 break;
11493                         smp_rmb();
11494                         msleep_interruptible(10);
11495                 }
11496                 if (i == 10)
11497                         rc = -ENODEV;
11498         }
11499
11500         return rc;
11501 }
11502
11503 static void bnx2x_self_test(struct net_device *dev,
11504                             struct ethtool_test *etest, u64 *buf)
11505 {
11506         struct bnx2x *bp = netdev_priv(dev);
11507
11508         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11509                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11510                 etest->flags |= ETH_TEST_FL_FAILED;
11511                 return;
11512         }
11513
11514         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11515
11516         if (!netif_running(dev))
11517                 return;
11518
11519         /* offline tests are not supported in MF mode */
11520         if (IS_E1HMF(bp))
11521                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11522
11523         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11524                 int port = BP_PORT(bp);
11525                 u32 val;
11526                 u8 link_up;
11527
11528                 /* save current value of input enable for TX port IF */
11529                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11530                 /* disable input for TX port IF */
11531                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11532
11533                 link_up = (bnx2x_link_test(bp) == 0);
11534                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11535                 bnx2x_nic_load(bp, LOAD_DIAG);
11536                 /* wait until link state is restored */
11537                 bnx2x_wait_for_link(bp, link_up);
11538
11539                 if (bnx2x_test_registers(bp) != 0) {
11540                         buf[0] = 1;
11541                         etest->flags |= ETH_TEST_FL_FAILED;
11542                 }
11543                 if (bnx2x_test_memory(bp) != 0) {
11544                         buf[1] = 1;
11545                         etest->flags |= ETH_TEST_FL_FAILED;
11546                 }
11547                 buf[2] = bnx2x_test_loopback(bp, link_up);
11548                 if (buf[2] != 0)
11549                         etest->flags |= ETH_TEST_FL_FAILED;
11550
11551                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11552
11553                 /* restore input for TX port IF */
11554                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11555
11556                 bnx2x_nic_load(bp, LOAD_NORMAL);
11557                 /* wait until link state is restored */
11558                 bnx2x_wait_for_link(bp, link_up);
11559         }
11560         if (bnx2x_test_nvram(bp) != 0) {
11561                 buf[3] = 1;
11562                 etest->flags |= ETH_TEST_FL_FAILED;
11563         }
11564         if (bnx2x_test_intr(bp) != 0) {
11565                 buf[4] = 1;
11566                 etest->flags |= ETH_TEST_FL_FAILED;
11567         }
11568         if (bp->port.pmf)
11569                 if (bnx2x_link_test(bp) != 0) {
11570                         buf[5] = 1;
11571                         etest->flags |= ETH_TEST_FL_FAILED;
11572                 }
11573
11574 #ifdef BNX2X_EXTRA_DEBUG
11575         bnx2x_panic_dump(bp);
11576 #endif
11577 }
11578
11579 static const struct {
11580         long offset;
11581         int size;
11582         u8 string[ETH_GSTRING_LEN];
11583 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11584 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11585         { Q_STATS_OFFSET32(error_bytes_received_hi),
11586                                                 8, "[%d]: rx_error_bytes" },
11587         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11588                                                 8, "[%d]: rx_ucast_packets" },
11589         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11590                                                 8, "[%d]: rx_mcast_packets" },
11591         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11592                                                 8, "[%d]: rx_bcast_packets" },
11593         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11594         { Q_STATS_OFFSET32(rx_err_discard_pkt),
11595                                          4, "[%d]: rx_phy_ip_err_discards"},
11596         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11597                                          4, "[%d]: rx_skb_alloc_discard" },
11598         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11599
11600 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11601         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11602                                                 8, "[%d]: tx_ucast_packets" },
11603         { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11604                                                 8, "[%d]: tx_mcast_packets" },
11605         { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11606                                                 8, "[%d]: tx_bcast_packets" }
11607 };
11608
11609 static const struct {
11610         long offset;
11611         int size;
11612         u32 flags;
11613 #define STATS_FLAGS_PORT                1
11614 #define STATS_FLAGS_FUNC                2
11615 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11616         u8 string[ETH_GSTRING_LEN];
11617 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11618 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11619                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
11620         { STATS_OFFSET32(error_bytes_received_hi),
11621                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11622         { STATS_OFFSET32(total_unicast_packets_received_hi),
11623                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11624         { STATS_OFFSET32(total_multicast_packets_received_hi),
11625                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11626         { STATS_OFFSET32(total_broadcast_packets_received_hi),
11627                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11628         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11629                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11630         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11631                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
11632         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11633                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11634         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11635                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11636 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11637                                 8, STATS_FLAGS_PORT, "rx_fragments" },
11638         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11639                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
11640         { STATS_OFFSET32(no_buff_discard_hi),
11641                                 8, STATS_FLAGS_BOTH, "rx_discards" },
11642         { STATS_OFFSET32(mac_filter_discard),
11643                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11644         { STATS_OFFSET32(xxoverflow_discard),
11645                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11646         { STATS_OFFSET32(brb_drop_hi),
11647                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11648         { STATS_OFFSET32(brb_truncate_hi),
11649                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11650         { STATS_OFFSET32(pause_frames_received_hi),
11651                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11652         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11653                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11654         { STATS_OFFSET32(nig_timer_max),
11655                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11656 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11657                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11658         { STATS_OFFSET32(rx_skb_alloc_failed),
11659                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11660         { STATS_OFFSET32(hw_csum_err),
11661                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11662
11663         { STATS_OFFSET32(total_bytes_transmitted_hi),
11664                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
11665         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11666                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11667         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11668                                 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11669         { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11670                                 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11671         { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11672                                 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11673         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11674                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11675         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11676                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11677 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11678                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11679         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11680                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11681         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11682                                 8, STATS_FLAGS_PORT, "tx_deferred" },
11683         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11684                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11685         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11686                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11687         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11688                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11689         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11690                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11691         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11692                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11693         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11694                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11695         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11696                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11697 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11698                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11699         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11700                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11701         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11702                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11703         { STATS_OFFSET32(pause_frames_sent_hi),
11704                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11705 };
11706
11707 #define IS_PORT_STAT(i) \
11708         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11709 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11710 #define IS_E1HMF_MODE_STAT(bp) \
11711                         (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11712
11713 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11714 {
11715         struct bnx2x *bp = netdev_priv(dev);
11716         int i, num_stats;
11717
11718         switch (stringset) {
11719         case ETH_SS_STATS:
11720                 if (is_multi(bp)) {
11721                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11722                         if (!IS_E1HMF_MODE_STAT(bp))
11723                                 num_stats += BNX2X_NUM_STATS;
11724                 } else {
11725                         if (IS_E1HMF_MODE_STAT(bp)) {
11726                                 num_stats = 0;
11727                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
11728                                         if (IS_FUNC_STAT(i))
11729                                                 num_stats++;
11730                         } else
11731                                 num_stats = BNX2X_NUM_STATS;
11732                 }
11733                 return num_stats;
11734
11735         case ETH_SS_TEST:
11736                 return BNX2X_NUM_TESTS;
11737
11738         default:
11739                 return -EINVAL;
11740         }
11741 }
11742
11743 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11744 {
11745         struct bnx2x *bp = netdev_priv(dev);
11746         int i, j, k;
11747
11748         switch (stringset) {
11749         case ETH_SS_STATS:
11750                 if (is_multi(bp)) {
11751                         k = 0;
11752                         for_each_queue(bp, i) {
11753                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11754                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11755                                                 bnx2x_q_stats_arr[j].string, i);
11756                                 k += BNX2X_NUM_Q_STATS;
11757                         }
11758                         if (IS_E1HMF_MODE_STAT(bp))
11759                                 break;
11760                         for (j = 0; j < BNX2X_NUM_STATS; j++)
11761                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11762                                        bnx2x_stats_arr[j].string);
11763                 } else {
11764                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11765                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11766                                         continue;
11767                                 strcpy(buf + j*ETH_GSTRING_LEN,
11768                                        bnx2x_stats_arr[i].string);
11769                                 j++;
11770                         }
11771                 }
11772                 break;
11773
11774         case ETH_SS_TEST:
11775                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11776                 break;
11777         }
11778 }
11779
11780 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11781                                     struct ethtool_stats *stats, u64 *buf)
11782 {
11783         struct bnx2x *bp = netdev_priv(dev);
11784         u32 *hw_stats, *offset;
11785         int i, j, k;
11786
11787         if (is_multi(bp)) {
11788                 k = 0;
11789                 for_each_queue(bp, i) {
11790                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11791                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11792                                 if (bnx2x_q_stats_arr[j].size == 0) {
11793                                         /* skip this counter */
11794                                         buf[k + j] = 0;
11795                                         continue;
11796                                 }
11797                                 offset = (hw_stats +
11798                                           bnx2x_q_stats_arr[j].offset);
11799                                 if (bnx2x_q_stats_arr[j].size == 4) {
11800                                         /* 4-byte counter */
11801                                         buf[k + j] = (u64) *offset;
11802                                         continue;
11803                                 }
11804                                 /* 8-byte counter */
11805                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11806                         }
11807                         k += BNX2X_NUM_Q_STATS;
11808                 }
11809                 if (IS_E1HMF_MODE_STAT(bp))
11810                         return;
11811                 hw_stats = (u32 *)&bp->eth_stats;
11812                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11813                         if (bnx2x_stats_arr[j].size == 0) {
11814                                 /* skip this counter */
11815                                 buf[k + j] = 0;
11816                                 continue;
11817                         }
11818                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
11819                         if (bnx2x_stats_arr[j].size == 4) {
11820                                 /* 4-byte counter */
11821                                 buf[k + j] = (u64) *offset;
11822                                 continue;
11823                         }
11824                         /* 8-byte counter */
11825                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
11826                 }
11827         } else {
11828                 hw_stats = (u32 *)&bp->eth_stats;
11829                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11830                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11831                                 continue;
11832                         if (bnx2x_stats_arr[i].size == 0) {
11833                                 /* skip this counter */
11834                                 buf[j] = 0;
11835                                 j++;
11836                                 continue;
11837                         }
11838                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
11839                         if (bnx2x_stats_arr[i].size == 4) {
11840                                 /* 4-byte counter */
11841                                 buf[j] = (u64) *offset;
11842                                 j++;
11843                                 continue;
11844                         }
11845                         /* 8-byte counter */
11846                         buf[j] = HILO_U64(*offset, *(offset + 1));
11847                         j++;
11848                 }
11849         }
11850 }
11851
11852 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11853 {
11854         struct bnx2x *bp = netdev_priv(dev);
11855         int i;
11856
11857         if (!netif_running(dev))
11858                 return 0;
11859
11860         if (!bp->port.pmf)
11861                 return 0;
11862
11863         if (data == 0)
11864                 data = 2;
11865
11866         for (i = 0; i < (data * 2); i++) {
11867                 if ((i % 2) == 0)
11868                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11869                                       SPEED_1000);
11870                 else
11871                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11872
11873                 msleep_interruptible(500);
11874                 if (signal_pending(current))
11875                         break;
11876         }
11877
11878         if (bp->link_vars.link_up)
11879                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11880                               bp->link_vars.line_speed);
11881
11882         return 0;
11883 }
11884
11885 static const struct ethtool_ops bnx2x_ethtool_ops = {
11886         .get_settings           = bnx2x_get_settings,
11887         .set_settings           = bnx2x_set_settings,
11888         .get_drvinfo            = bnx2x_get_drvinfo,
11889         .get_regs_len           = bnx2x_get_regs_len,
11890         .get_regs               = bnx2x_get_regs,
11891         .get_wol                = bnx2x_get_wol,
11892         .set_wol                = bnx2x_set_wol,
11893         .get_msglevel           = bnx2x_get_msglevel,
11894         .set_msglevel           = bnx2x_set_msglevel,
11895         .nway_reset             = bnx2x_nway_reset,
11896         .get_link               = bnx2x_get_link,
11897         .get_eeprom_len         = bnx2x_get_eeprom_len,
11898         .get_eeprom             = bnx2x_get_eeprom,
11899         .set_eeprom             = bnx2x_set_eeprom,
11900         .get_coalesce           = bnx2x_get_coalesce,
11901         .set_coalesce           = bnx2x_set_coalesce,
11902         .get_ringparam          = bnx2x_get_ringparam,
11903         .set_ringparam          = bnx2x_set_ringparam,
11904         .get_pauseparam         = bnx2x_get_pauseparam,
11905         .set_pauseparam         = bnx2x_set_pauseparam,
11906         .get_rx_csum            = bnx2x_get_rx_csum,
11907         .set_rx_csum            = bnx2x_set_rx_csum,
11908         .get_tx_csum            = ethtool_op_get_tx_csum,
11909         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
11910         .set_flags              = bnx2x_set_flags,
11911         .get_flags              = ethtool_op_get_flags,
11912         .get_sg                 = ethtool_op_get_sg,
11913         .set_sg                 = ethtool_op_set_sg,
11914         .get_tso                = ethtool_op_get_tso,
11915         .set_tso                = bnx2x_set_tso,
11916         .self_test              = bnx2x_self_test,
11917         .get_sset_count         = bnx2x_get_sset_count,
11918         .get_strings            = bnx2x_get_strings,
11919         .phys_id                = bnx2x_phys_id,
11920         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
11921 };
11922
11923 /* end of ethtool_ops */
11924
11925 /****************************************************************************
11926 * General service functions
11927 ****************************************************************************/
11928
11929 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11930 {
11931         u16 pmcsr;
11932
11933         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11934
11935         switch (state) {
11936         case PCI_D0:
11937                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11938                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11939                                        PCI_PM_CTRL_PME_STATUS));
11940
11941                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11942                         /* delay required during transition out of D3hot */
11943                         msleep(20);
11944                 break;
11945
11946         case PCI_D3hot:
11947                 /* If there are other clients above don't
11948                    shut down the power */
11949                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11950                         return 0;
11951                 /* Don't shut down the power for emulation and FPGA */
11952                 if (CHIP_REV_IS_SLOW(bp))
11953                         return 0;
11954
11955                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11956                 pmcsr |= 3;
11957
11958                 if (bp->wol)
11959                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11960
11961                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11962                                       pmcsr);
11963
11964                 /* No more memory access after this point until
11965                 * device is brought back to D0.
11966                 */
11967                 break;
11968
11969         default:
11970                 return -EINVAL;
11971         }
11972         return 0;
11973 }
11974
11975 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11976 {
11977         u16 rx_cons_sb;
11978
11979         /* Tell compiler that status block fields can change */
11980         barrier();
11981         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11982         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11983                 rx_cons_sb++;
11984         return (fp->rx_comp_cons != rx_cons_sb);
11985 }
11986
11987 /*
11988  * net_device service functions
11989  */
11990
11991 static int bnx2x_poll(struct napi_struct *napi, int budget)
11992 {
11993         int work_done = 0;
11994         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11995                                                  napi);
11996         struct bnx2x *bp = fp->bp;
11997
11998         while (1) {
11999 #ifdef BNX2X_STOP_ON_ERROR
12000                 if (unlikely(bp->panic)) {
12001                         napi_complete(napi);
12002                         return 0;
12003                 }
12004 #endif
12005
12006                 if (bnx2x_has_tx_work(fp))
12007                         bnx2x_tx_int(fp);
12008
12009                 if (bnx2x_has_rx_work(fp)) {
12010                         work_done += bnx2x_rx_int(fp, budget - work_done);
12011
12012                         /* must not complete if we consumed full budget */
12013                         if (work_done >= budget)
12014                                 break;
12015                 }
12016
12017                 /* Fall out from the NAPI loop if needed */
12018                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12019                         bnx2x_update_fpsb_idx(fp);
12020                 /* bnx2x_has_rx_work() reads the status block, thus we need
12021                  * to ensure that status block indices have been actually read
12022                  * (bnx2x_update_fpsb_idx) prior to this check
12023                  * (bnx2x_has_rx_work) so that we won't write the "newer"
12024                  * value of the status block to IGU (if there was a DMA right
12025                  * after bnx2x_has_rx_work and if there is no rmb, the memory
12026                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
12027                  * before bnx2x_ack_sb). In this case there will never be
12028                  * another interrupt until there is another update of the
12029                  * status block, while there is still unhandled work.
12030                  */
12031                         rmb();
12032
12033                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12034                                 napi_complete(napi);
12035                                 /* Re-enable interrupts */
12036                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12037                                              le16_to_cpu(fp->fp_c_idx),
12038                                              IGU_INT_NOP, 1);
12039                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12040                                              le16_to_cpu(fp->fp_u_idx),
12041                                              IGU_INT_ENABLE, 1);
12042                                 break;
12043                         }
12044                 }
12045         }
12046
12047         return work_done;
12048 }
12049
12050
12051 /* we split the first BD into headers and data BDs
12052  * to ease the pain of our fellow microcode engineers
12053  * we use one mapping for both BDs
12054  * So far this has only been observed to happen
12055  * in Other Operating Systems(TM)
12056  */
12057 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12058                                    struct bnx2x_fastpath *fp,
12059                                    struct sw_tx_bd *tx_buf,
12060                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
12061                                    u16 bd_prod, int nbd)
12062 {
12063         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12064         struct eth_tx_bd *d_tx_bd;
12065         dma_addr_t mapping;
12066         int old_len = le16_to_cpu(h_tx_bd->nbytes);
12067
12068         /* first fix first BD */
12069         h_tx_bd->nbd = cpu_to_le16(nbd);
12070         h_tx_bd->nbytes = cpu_to_le16(hlen);
12071
12072         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12073            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12074            h_tx_bd->addr_lo, h_tx_bd->nbd);
12075
12076         /* now get a new data BD
12077          * (after the pbd) and fill it */
12078         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12079         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12080
12081         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12082                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12083
12084         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12085         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12086         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12087
12088         /* this marks the BD as one that has no individual mapping */
12089         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12090
12091         DP(NETIF_MSG_TX_QUEUED,
12092            "TSO split data size is %d (%x:%x)\n",
12093            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12094
12095         /* update tx_bd */
12096         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12097
12098         return bd_prod;
12099 }
12100
12101 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12102 {
12103         if (fix > 0)
12104                 csum = (u16) ~csum_fold(csum_sub(csum,
12105                                 csum_partial(t_header - fix, fix, 0)));
12106
12107         else if (fix < 0)
12108                 csum = (u16) ~csum_fold(csum_add(csum,
12109                                 csum_partial(t_header, -fix, 0)));
12110
12111         return swab16(csum);
12112 }
12113
12114 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12115 {
12116         u32 rc;
12117
12118         if (skb->ip_summed != CHECKSUM_PARTIAL)
12119                 rc = XMIT_PLAIN;
12120
12121         else {
12122                 if (skb->protocol == htons(ETH_P_IPV6)) {
12123                         rc = XMIT_CSUM_V6;
12124                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12125                                 rc |= XMIT_CSUM_TCP;
12126
12127                 } else {
12128                         rc = XMIT_CSUM_V4;
12129                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12130                                 rc |= XMIT_CSUM_TCP;
12131                 }
12132         }
12133
12134         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12135                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12136
12137         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12138                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12139
12140         return rc;
12141 }
12142
12143 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12144 /* check if packet requires linearization (packet is too fragmented)
12145    no need to check fragmentation if page size > 8K (there will be no
12146    violation to FW restrictions) */
12147 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12148                              u32 xmit_type)
12149 {
12150         int to_copy = 0;
12151         int hlen = 0;
12152         int first_bd_sz = 0;
12153
12154         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12155         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12156
12157                 if (xmit_type & XMIT_GSO) {
12158                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12159                         /* Check if LSO packet needs to be copied:
12160                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12161                         int wnd_size = MAX_FETCH_BD - 3;
12162                         /* Number of windows to check */
12163                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12164                         int wnd_idx = 0;
12165                         int frag_idx = 0;
12166                         u32 wnd_sum = 0;
12167
12168                         /* Headers length */
12169                         hlen = (int)(skb_transport_header(skb) - skb->data) +
12170                                 tcp_hdrlen(skb);
12171
12172                         /* Amount of data (w/o headers) on linear part of SKB*/
12173                         first_bd_sz = skb_headlen(skb) - hlen;
12174
12175                         wnd_sum  = first_bd_sz;
12176
12177                         /* Calculate the first sum - it's special */
12178                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12179                                 wnd_sum +=
12180                                         skb_shinfo(skb)->frags[frag_idx].size;
12181
12182                         /* If there was data on linear skb data - check it */
12183                         if (first_bd_sz > 0) {
12184                                 if (unlikely(wnd_sum < lso_mss)) {
12185                                         to_copy = 1;
12186                                         goto exit_lbl;
12187                                 }
12188
12189                                 wnd_sum -= first_bd_sz;
12190                         }
12191
12192                         /* Others are easier: run through the frag list and
12193                            check all windows */
12194                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12195                                 wnd_sum +=
12196                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12197
12198                                 if (unlikely(wnd_sum < lso_mss)) {
12199                                         to_copy = 1;
12200                                         break;
12201                                 }
12202                                 wnd_sum -=
12203                                         skb_shinfo(skb)->frags[wnd_idx].size;
12204                         }
12205                 } else {
12206                         /* in non-LSO too fragmented packet should always
12207                            be linearized */
12208                         to_copy = 1;
12209                 }
12210         }
12211
12212 exit_lbl:
12213         if (unlikely(to_copy))
12214                 DP(NETIF_MSG_TX_QUEUED,
12215                    "Linearization IS REQUIRED for %s packet. "
12216                    "num_frags %d  hlen %d  first_bd_sz %d\n",
12217                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12218                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12219
12220         return to_copy;
12221 }
12222 #endif
12223
12224 /* called with netif_tx_lock
12225  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12226  * netif_wake_queue()
12227  */
12228 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12229 {
12230         struct bnx2x *bp = netdev_priv(dev);
12231         struct bnx2x_fastpath *fp;
12232         struct netdev_queue *txq;
12233         struct sw_tx_bd *tx_buf;
12234         struct eth_tx_start_bd *tx_start_bd;
12235         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12236         struct eth_tx_parse_bd *pbd = NULL;
12237         u16 pkt_prod, bd_prod;
12238         int nbd, fp_index;
12239         dma_addr_t mapping;
12240         u32 xmit_type = bnx2x_xmit_type(bp, skb);
12241         int i;
12242         u8 hlen = 0;
12243         __le16 pkt_size = 0;
12244         struct ethhdr *eth;
12245         u8 mac_type = UNICAST_ADDRESS;
12246
12247 #ifdef BNX2X_STOP_ON_ERROR
12248         if (unlikely(bp->panic))
12249                 return NETDEV_TX_BUSY;
12250 #endif
12251
12252         fp_index = skb_get_queue_mapping(skb);
12253         txq = netdev_get_tx_queue(dev, fp_index);
12254
12255         fp = &bp->fp[fp_index];
12256
12257         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12258                 fp->eth_q_stats.driver_xoff++;
12259                 netif_tx_stop_queue(txq);
12260                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12261                 return NETDEV_TX_BUSY;
12262         }
12263
12264         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
12265            "  gso type %x  xmit_type %x\n",
12266            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12267            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12268
12269         eth = (struct ethhdr *)skb->data;
12270
12271         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12272         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12273                 if (is_broadcast_ether_addr(eth->h_dest))
12274                         mac_type = BROADCAST_ADDRESS;
12275                 else
12276                         mac_type = MULTICAST_ADDRESS;
12277         }
12278
12279 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12280         /* First, check if we need to linearize the skb (due to FW
12281            restrictions). No need to check fragmentation if page size > 8K
12282            (there will be no violation to FW restrictions) */
12283         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12284                 /* Statistics of linearization */
12285                 bp->lin_cnt++;
12286                 if (skb_linearize(skb) != 0) {
12287                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12288                            "silently dropping this SKB\n");
12289                         dev_kfree_skb_any(skb);
12290                         return NETDEV_TX_OK;
12291                 }
12292         }
12293 #endif
12294
12295         /*
12296         Please read carefully. First we use one BD which we mark as start,
12297         then we have a parsing info BD (used for TSO or xsum),
12298         and only then we have the rest of the TSO BDs.
12299         (don't forget to mark the last one as last,
12300         and to unmap only AFTER you write to the BD ...)
12301         And above all, all pdb sizes are in words - NOT DWORDS!
12302         */
12303
12304         pkt_prod = fp->tx_pkt_prod++;
12305         bd_prod = TX_BD(fp->tx_bd_prod);
12306
12307         /* get a tx_buf and first BD */
12308         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12309         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12310
12311         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12312         tx_start_bd->general_data =  (mac_type <<
12313                                         ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12314         /* header nbd */
12315         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12316
12317         /* remember the first BD of the packet */
12318         tx_buf->first_bd = fp->tx_bd_prod;
12319         tx_buf->skb = skb;
12320         tx_buf->flags = 0;
12321
12322         DP(NETIF_MSG_TX_QUEUED,
12323            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
12324            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12325
12326 #ifdef BCM_VLAN
12327         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12328             (bp->flags & HW_VLAN_TX_FLAG)) {
12329                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12330                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12331         } else
12332 #endif
12333                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12334
12335         /* turn on parsing and get a BD */
12336         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12337         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12338
12339         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12340
12341         if (xmit_type & XMIT_CSUM) {
12342                 hlen = (skb_network_header(skb) - skb->data) / 2;
12343
12344                 /* for now NS flag is not used in Linux */
12345                 pbd->global_data =
12346                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12347                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12348
12349                 pbd->ip_hlen = (skb_transport_header(skb) -
12350                                 skb_network_header(skb)) / 2;
12351
12352                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12353
12354                 pbd->total_hlen = cpu_to_le16(hlen);
12355                 hlen = hlen*2;
12356
12357                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12358
12359                 if (xmit_type & XMIT_CSUM_V4)
12360                         tx_start_bd->bd_flags.as_bitfield |=
12361                                                 ETH_TX_BD_FLAGS_IP_CSUM;
12362                 else
12363                         tx_start_bd->bd_flags.as_bitfield |=
12364                                                 ETH_TX_BD_FLAGS_IPV6;
12365
12366                 if (xmit_type & XMIT_CSUM_TCP) {
12367                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12368
12369                 } else {
12370                         s8 fix = SKB_CS_OFF(skb); /* signed! */
12371
12372                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12373
12374                         DP(NETIF_MSG_TX_QUEUED,
12375                            "hlen %d  fix %d  csum before fix %x\n",
12376                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12377
12378                         /* HW bug: fixup the CSUM */
12379                         pbd->tcp_pseudo_csum =
12380                                 bnx2x_csum_fix(skb_transport_header(skb),
12381                                                SKB_CS(skb), fix);
12382
12383                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12384                            pbd->tcp_pseudo_csum);
12385                 }
12386         }
12387
12388         mapping = dma_map_single(&bp->pdev->dev, skb->data,
12389                                  skb_headlen(skb), DMA_TO_DEVICE);
12390
12391         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12392         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12393         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12394         tx_start_bd->nbd = cpu_to_le16(nbd);
12395         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12396         pkt_size = tx_start_bd->nbytes;
12397
12398         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
12399            "  nbytes %d  flags %x  vlan %x\n",
12400            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12401            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12402            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12403
12404         if (xmit_type & XMIT_GSO) {
12405
12406                 DP(NETIF_MSG_TX_QUEUED,
12407                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
12408                    skb->len, hlen, skb_headlen(skb),
12409                    skb_shinfo(skb)->gso_size);
12410
12411                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12412
12413                 if (unlikely(skb_headlen(skb) > hlen))
12414                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12415                                                  hlen, bd_prod, ++nbd);
12416
12417                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12418                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12419                 pbd->tcp_flags = pbd_tcp_flags(skb);
12420
12421                 if (xmit_type & XMIT_GSO_V4) {
12422                         pbd->ip_id = swab16(ip_hdr(skb)->id);
12423                         pbd->tcp_pseudo_csum =
12424                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12425                                                           ip_hdr(skb)->daddr,
12426                                                           0, IPPROTO_TCP, 0));
12427
12428                 } else
12429                         pbd->tcp_pseudo_csum =
12430                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12431                                                         &ipv6_hdr(skb)->daddr,
12432                                                         0, IPPROTO_TCP, 0));
12433
12434                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12435         }
12436         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12437
12438         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12439                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12440
12441                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12442                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12443                 if (total_pkt_bd == NULL)
12444                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12445
12446                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12447                                        frag->page_offset,
12448                                        frag->size, DMA_TO_DEVICE);
12449
12450                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12451                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12452                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12453                 le16_add_cpu(&pkt_size, frag->size);
12454
12455                 DP(NETIF_MSG_TX_QUEUED,
12456                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
12457                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12458                    le16_to_cpu(tx_data_bd->nbytes));
12459         }
12460
12461         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12462
12463         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12464
12465         /* now send a tx doorbell, counting the next BD
12466          * if the packet contains or ends with it
12467          */
12468         if (TX_BD_POFF(bd_prod) < nbd)
12469                 nbd++;
12470
12471         if (total_pkt_bd != NULL)
12472                 total_pkt_bd->total_pkt_bytes = pkt_size;
12473
12474         if (pbd)
12475                 DP(NETIF_MSG_TX_QUEUED,
12476                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
12477                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
12478                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12479                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12480                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12481
12482         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
12483
12484         /*
12485          * Make sure that the BD data is updated before updating the producer
12486          * since FW might read the BD right after the producer is updated.
12487          * This is only applicable for weak-ordered memory model archs such
12488          * as IA-64. The following barrier is also mandatory since FW will
12489          * assumes packets must have BDs.
12490          */
12491         wmb();
12492
12493         fp->tx_db.data.prod += nbd;
12494         barrier();
12495         DOORBELL(bp, fp->index, fp->tx_db.raw);
12496
12497         mmiowb();
12498
12499         fp->tx_bd_prod += nbd;
12500
12501         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12502                 netif_tx_stop_queue(txq);
12503
12504                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12505                  * ordering of set_bit() in netif_tx_stop_queue() and read of
12506                  * fp->bd_tx_cons */
12507                 smp_mb();
12508
12509                 fp->eth_q_stats.driver_xoff++;
12510                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12511                         netif_tx_wake_queue(txq);
12512         }
12513         fp->tx_pkt++;
12514
12515         return NETDEV_TX_OK;
12516 }
12517
12518 /* called with rtnl_lock */
12519 static int bnx2x_open(struct net_device *dev)
12520 {
12521         struct bnx2x *bp = netdev_priv(dev);
12522
12523         netif_carrier_off(dev);
12524
12525         bnx2x_set_power_state(bp, PCI_D0);
12526
12527         if (!bnx2x_reset_is_done(bp)) {
12528                 do {
12529                         /* Reset MCP mail box sequence if there is on going
12530                          * recovery
12531                          */
12532                         bp->fw_seq = 0;
12533
12534                         /* If it's the first function to load and reset done
12535                          * is still not cleared it may mean that. We don't
12536                          * check the attention state here because it may have
12537                          * already been cleared by a "common" reset but we
12538                          * shell proceed with "process kill" anyway.
12539                          */
12540                         if ((bnx2x_get_load_cnt(bp) == 0) &&
12541                                 bnx2x_trylock_hw_lock(bp,
12542                                 HW_LOCK_RESOURCE_RESERVED_08) &&
12543                                 (!bnx2x_leader_reset(bp))) {
12544                                 DP(NETIF_MSG_HW, "Recovered in open\n");
12545                                 break;
12546                         }
12547
12548                         bnx2x_set_power_state(bp, PCI_D3hot);
12549
12550                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12551                         " completed yet. Try again later. If u still see this"
12552                         " message after a few retries then power cycle is"
12553                         " required.\n", bp->dev->name);
12554
12555                         return -EAGAIN;
12556                 } while (0);
12557         }
12558
12559         bp->recovery_state = BNX2X_RECOVERY_DONE;
12560
12561         return bnx2x_nic_load(bp, LOAD_OPEN);
12562 }
12563
12564 /* called with rtnl_lock */
12565 static int bnx2x_close(struct net_device *dev)
12566 {
12567         struct bnx2x *bp = netdev_priv(dev);
12568
12569         /* Unload the driver, release IRQs */
12570         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12571         bnx2x_set_power_state(bp, PCI_D3hot);
12572
12573         return 0;
12574 }
12575
12576 /* called with netif_tx_lock from dev_mcast.c */
12577 static void bnx2x_set_rx_mode(struct net_device *dev)
12578 {
12579         struct bnx2x *bp = netdev_priv(dev);
12580         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12581         int port = BP_PORT(bp);
12582
12583         if (bp->state != BNX2X_STATE_OPEN) {
12584                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12585                 return;
12586         }
12587
12588         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12589
12590         if (dev->flags & IFF_PROMISC)
12591                 rx_mode = BNX2X_RX_MODE_PROMISC;
12592
12593         else if ((dev->flags & IFF_ALLMULTI) ||
12594                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12595                   CHIP_IS_E1(bp)))
12596                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12597
12598         else { /* some multicasts */
12599                 if (CHIP_IS_E1(bp)) {
12600                         int i, old, offset;
12601                         struct netdev_hw_addr *ha;
12602                         struct mac_configuration_cmd *config =
12603                                                 bnx2x_sp(bp, mcast_config);
12604
12605                         i = 0;
12606                         netdev_for_each_mc_addr(ha, dev) {
12607                                 config->config_table[i].
12608                                         cam_entry.msb_mac_addr =
12609                                         swab16(*(u16 *)&ha->addr[0]);
12610                                 config->config_table[i].
12611                                         cam_entry.middle_mac_addr =
12612                                         swab16(*(u16 *)&ha->addr[2]);
12613                                 config->config_table[i].
12614                                         cam_entry.lsb_mac_addr =
12615                                         swab16(*(u16 *)&ha->addr[4]);
12616                                 config->config_table[i].cam_entry.flags =
12617                                                         cpu_to_le16(port);
12618                                 config->config_table[i].
12619                                         target_table_entry.flags = 0;
12620                                 config->config_table[i].target_table_entry.
12621                                         clients_bit_vector =
12622                                                 cpu_to_le32(1 << BP_L_ID(bp));
12623                                 config->config_table[i].
12624                                         target_table_entry.vlan_id = 0;
12625
12626                                 DP(NETIF_MSG_IFUP,
12627                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12628                                    config->config_table[i].
12629                                                 cam_entry.msb_mac_addr,
12630                                    config->config_table[i].
12631                                                 cam_entry.middle_mac_addr,
12632                                    config->config_table[i].
12633                                                 cam_entry.lsb_mac_addr);
12634                                 i++;
12635                         }
12636                         old = config->hdr.length;
12637                         if (old > i) {
12638                                 for (; i < old; i++) {
12639                                         if (CAM_IS_INVALID(config->
12640                                                            config_table[i])) {
12641                                                 /* already invalidated */
12642                                                 break;
12643                                         }
12644                                         /* invalidate */
12645                                         CAM_INVALIDATE(config->
12646                                                        config_table[i]);
12647                                 }
12648                         }
12649
12650                         if (CHIP_REV_IS_SLOW(bp))
12651                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12652                         else
12653                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
12654
12655                         config->hdr.length = i;
12656                         config->hdr.offset = offset;
12657                         config->hdr.client_id = bp->fp->cl_id;
12658                         config->hdr.reserved1 = 0;
12659
12660                         bp->set_mac_pending++;
12661                         smp_wmb();
12662
12663                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12664                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12665                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12666                                       0);
12667                 } else { /* E1H */
12668                         /* Accept one or more multicasts */
12669                         struct netdev_hw_addr *ha;
12670                         u32 mc_filter[MC_HASH_SIZE];
12671                         u32 crc, bit, regidx;
12672                         int i;
12673
12674                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12675
12676                         netdev_for_each_mc_addr(ha, dev) {
12677                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12678                                    ha->addr);
12679
12680                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12681                                 bit = (crc >> 24) & 0xff;
12682                                 regidx = bit >> 5;
12683                                 bit &= 0x1f;
12684                                 mc_filter[regidx] |= (1 << bit);
12685                         }
12686
12687                         for (i = 0; i < MC_HASH_SIZE; i++)
12688                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12689                                        mc_filter[i]);
12690                 }
12691         }
12692
12693         bp->rx_mode = rx_mode;
12694         bnx2x_set_storm_rx_mode(bp);
12695 }
12696
12697 /* called with rtnl_lock */
12698 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12699 {
12700         struct sockaddr *addr = p;
12701         struct bnx2x *bp = netdev_priv(dev);
12702
12703         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12704                 return -EINVAL;
12705
12706         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12707         if (netif_running(dev)) {
12708                 if (CHIP_IS_E1(bp))
12709                         bnx2x_set_eth_mac_addr_e1(bp, 1);
12710                 else
12711                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
12712         }
12713
12714         return 0;
12715 }
12716
12717 /* called with rtnl_lock */
12718 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12719                            int devad, u16 addr)
12720 {
12721         struct bnx2x *bp = netdev_priv(netdev);
12722         u16 value;
12723         int rc;
12724         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12725
12726         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12727            prtad, devad, addr);
12728
12729         if (prtad != bp->mdio.prtad) {
12730                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12731                    prtad, bp->mdio.prtad);
12732                 return -EINVAL;
12733         }
12734
12735         /* The HW expects different devad if CL22 is used */
12736         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12737
12738         bnx2x_acquire_phy_lock(bp);
12739         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12740                              devad, addr, &value);
12741         bnx2x_release_phy_lock(bp);
12742         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12743
12744         if (!rc)
12745                 rc = value;
12746         return rc;
12747 }
12748
12749 /* called with rtnl_lock */
12750 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12751                             u16 addr, u16 value)
12752 {
12753         struct bnx2x *bp = netdev_priv(netdev);
12754         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12755         int rc;
12756
12757         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12758                            " value 0x%x\n", prtad, devad, addr, value);
12759
12760         if (prtad != bp->mdio.prtad) {
12761                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12762                    prtad, bp->mdio.prtad);
12763                 return -EINVAL;
12764         }
12765
12766         /* The HW expects different devad if CL22 is used */
12767         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12768
12769         bnx2x_acquire_phy_lock(bp);
12770         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12771                               devad, addr, value);
12772         bnx2x_release_phy_lock(bp);
12773         return rc;
12774 }
12775
12776 /* called with rtnl_lock */
12777 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12778 {
12779         struct bnx2x *bp = netdev_priv(dev);
12780         struct mii_ioctl_data *mdio = if_mii(ifr);
12781
12782         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12783            mdio->phy_id, mdio->reg_num, mdio->val_in);
12784
12785         if (!netif_running(dev))
12786                 return -EAGAIN;
12787
12788         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12789 }
12790
12791 /* called with rtnl_lock */
12792 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12793 {
12794         struct bnx2x *bp = netdev_priv(dev);
12795         int rc = 0;
12796
12797         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12798                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12799                 return -EAGAIN;
12800         }
12801
12802         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12803             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12804                 return -EINVAL;
12805
12806         /* This does not race with packet allocation
12807          * because the actual alloc size is
12808          * only updated as part of load
12809          */
12810         dev->mtu = new_mtu;
12811
12812         if (netif_running(dev)) {
12813                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12814                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12815         }
12816
12817         return rc;
12818 }
12819
12820 static void bnx2x_tx_timeout(struct net_device *dev)
12821 {
12822         struct bnx2x *bp = netdev_priv(dev);
12823
12824 #ifdef BNX2X_STOP_ON_ERROR
12825         if (!bp->panic)
12826                 bnx2x_panic();
12827 #endif
12828         /* This allows the netif to be shutdown gracefully before resetting */
12829         schedule_delayed_work(&bp->reset_task, 0);
12830 }
12831
12832 #ifdef BCM_VLAN
12833 /* called with rtnl_lock */
12834 static void bnx2x_vlan_rx_register(struct net_device *dev,
12835                                    struct vlan_group *vlgrp)
12836 {
12837         struct bnx2x *bp = netdev_priv(dev);
12838
12839         bp->vlgrp = vlgrp;
12840
12841         /* Set flags according to the required capabilities */
12842         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12843
12844         if (dev->features & NETIF_F_HW_VLAN_TX)
12845                 bp->flags |= HW_VLAN_TX_FLAG;
12846
12847         if (dev->features & NETIF_F_HW_VLAN_RX)
12848                 bp->flags |= HW_VLAN_RX_FLAG;
12849
12850         if (netif_running(dev))
12851                 bnx2x_set_client_config(bp);
12852 }
12853
12854 #endif
12855
12856 #ifdef CONFIG_NET_POLL_CONTROLLER
12857 static void poll_bnx2x(struct net_device *dev)
12858 {
12859         struct bnx2x *bp = netdev_priv(dev);
12860
12861         disable_irq(bp->pdev->irq);
12862         bnx2x_interrupt(bp->pdev->irq, dev);
12863         enable_irq(bp->pdev->irq);
12864 }
12865 #endif
12866
12867 static const struct net_device_ops bnx2x_netdev_ops = {
12868         .ndo_open               = bnx2x_open,
12869         .ndo_stop               = bnx2x_close,
12870         .ndo_start_xmit         = bnx2x_start_xmit,
12871         .ndo_set_multicast_list = bnx2x_set_rx_mode,
12872         .ndo_set_mac_address    = bnx2x_change_mac_addr,
12873         .ndo_validate_addr      = eth_validate_addr,
12874         .ndo_do_ioctl           = bnx2x_ioctl,
12875         .ndo_change_mtu         = bnx2x_change_mtu,
12876         .ndo_tx_timeout         = bnx2x_tx_timeout,
12877 #ifdef BCM_VLAN
12878         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
12879 #endif
12880 #ifdef CONFIG_NET_POLL_CONTROLLER
12881         .ndo_poll_controller    = poll_bnx2x,
12882 #endif
12883 };
12884
12885 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12886                                     struct net_device *dev)
12887 {
12888         struct bnx2x *bp;
12889         int rc;
12890
12891         SET_NETDEV_DEV(dev, &pdev->dev);
12892         bp = netdev_priv(dev);
12893
12894         bp->dev = dev;
12895         bp->pdev = pdev;
12896         bp->flags = 0;
12897         bp->func = PCI_FUNC(pdev->devfn);
12898
12899         rc = pci_enable_device(pdev);
12900         if (rc) {
12901                 dev_err(&bp->pdev->dev,
12902                         "Cannot enable PCI device, aborting\n");
12903                 goto err_out;
12904         }
12905
12906         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12907                 dev_err(&bp->pdev->dev,
12908                         "Cannot find PCI device base address, aborting\n");
12909                 rc = -ENODEV;
12910                 goto err_out_disable;
12911         }
12912
12913         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12914                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12915                        " base address, aborting\n");
12916                 rc = -ENODEV;
12917                 goto err_out_disable;
12918         }
12919
12920         if (atomic_read(&pdev->enable_cnt) == 1) {
12921                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12922                 if (rc) {
12923                         dev_err(&bp->pdev->dev,
12924                                 "Cannot obtain PCI resources, aborting\n");
12925                         goto err_out_disable;
12926                 }
12927
12928                 pci_set_master(pdev);
12929                 pci_save_state(pdev);
12930         }
12931
12932         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12933         if (bp->pm_cap == 0) {
12934                 dev_err(&bp->pdev->dev,
12935                         "Cannot find power management capability, aborting\n");
12936                 rc = -EIO;
12937                 goto err_out_release;
12938         }
12939
12940         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12941         if (bp->pcie_cap == 0) {
12942                 dev_err(&bp->pdev->dev,
12943                         "Cannot find PCI Express capability, aborting\n");
12944                 rc = -EIO;
12945                 goto err_out_release;
12946         }
12947
12948         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12949                 bp->flags |= USING_DAC_FLAG;
12950                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12951                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12952                                " failed, aborting\n");
12953                         rc = -EIO;
12954                         goto err_out_release;
12955                 }
12956
12957         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12958                 dev_err(&bp->pdev->dev,
12959                         "System does not support DMA, aborting\n");
12960                 rc = -EIO;
12961                 goto err_out_release;
12962         }
12963
12964         dev->mem_start = pci_resource_start(pdev, 0);
12965         dev->base_addr = dev->mem_start;
12966         dev->mem_end = pci_resource_end(pdev, 0);
12967
12968         dev->irq = pdev->irq;
12969
12970         bp->regview = pci_ioremap_bar(pdev, 0);
12971         if (!bp->regview) {
12972                 dev_err(&bp->pdev->dev,
12973                         "Cannot map register space, aborting\n");
12974                 rc = -ENOMEM;
12975                 goto err_out_release;
12976         }
12977
12978         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12979                                         min_t(u64, BNX2X_DB_SIZE,
12980                                               pci_resource_len(pdev, 2)));
12981         if (!bp->doorbells) {
12982                 dev_err(&bp->pdev->dev,
12983                         "Cannot map doorbell space, aborting\n");
12984                 rc = -ENOMEM;
12985                 goto err_out_unmap;
12986         }
12987
12988         bnx2x_set_power_state(bp, PCI_D0);
12989
12990         /* clean indirect addresses */
12991         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12992                                PCICFG_VENDOR_ID_OFFSET);
12993         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12994         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12995         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12996         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
12997
12998         /* Reset the load counter */
12999         bnx2x_clear_load_cnt(bp);
13000
13001         dev->watchdog_timeo = TX_TIMEOUT;
13002
13003         dev->netdev_ops = &bnx2x_netdev_ops;
13004         dev->ethtool_ops = &bnx2x_ethtool_ops;
13005         dev->features |= NETIF_F_SG;
13006         dev->features |= NETIF_F_HW_CSUM;
13007         if (bp->flags & USING_DAC_FLAG)
13008                 dev->features |= NETIF_F_HIGHDMA;
13009         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13010         dev->features |= NETIF_F_TSO6;
13011 #ifdef BCM_VLAN
13012         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
13013         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
13014
13015         dev->vlan_features |= NETIF_F_SG;
13016         dev->vlan_features |= NETIF_F_HW_CSUM;
13017         if (bp->flags & USING_DAC_FLAG)
13018                 dev->vlan_features |= NETIF_F_HIGHDMA;
13019         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13020         dev->vlan_features |= NETIF_F_TSO6;
13021 #endif
13022
13023         /* get_port_hwinfo() will set prtad and mmds properly */
13024         bp->mdio.prtad = MDIO_PRTAD_NONE;
13025         bp->mdio.mmds = 0;
13026         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13027         bp->mdio.dev = dev;
13028         bp->mdio.mdio_read = bnx2x_mdio_read;
13029         bp->mdio.mdio_write = bnx2x_mdio_write;
13030
13031         return 0;
13032
13033 err_out_unmap:
13034         if (bp->regview) {
13035                 iounmap(bp->regview);
13036                 bp->regview = NULL;
13037         }
13038         if (bp->doorbells) {
13039                 iounmap(bp->doorbells);
13040                 bp->doorbells = NULL;
13041         }
13042
13043 err_out_release:
13044         if (atomic_read(&pdev->enable_cnt) == 1)
13045                 pci_release_regions(pdev);
13046
13047 err_out_disable:
13048         pci_disable_device(pdev);
13049         pci_set_drvdata(pdev, NULL);
13050
13051 err_out:
13052         return rc;
13053 }
13054
13055 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13056                                                  int *width, int *speed)
13057 {
13058         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13059
13060         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13061
13062         /* return value of 1=2.5GHz 2=5GHz */
13063         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13064 }
13065
13066 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13067 {
13068         const struct firmware *firmware = bp->firmware;
13069         struct bnx2x_fw_file_hdr *fw_hdr;
13070         struct bnx2x_fw_file_section *sections;
13071         u32 offset, len, num_ops;
13072         u16 *ops_offsets;
13073         int i;
13074         const u8 *fw_ver;
13075
13076         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13077                 return -EINVAL;
13078
13079         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13080         sections = (struct bnx2x_fw_file_section *)fw_hdr;
13081
13082         /* Make sure none of the offsets and sizes make us read beyond
13083          * the end of the firmware data */
13084         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13085                 offset = be32_to_cpu(sections[i].offset);
13086                 len = be32_to_cpu(sections[i].len);
13087                 if (offset + len > firmware->size) {
13088                         dev_err(&bp->pdev->dev,
13089                                 "Section %d length is out of bounds\n", i);
13090                         return -EINVAL;
13091                 }
13092         }
13093
13094         /* Likewise for the init_ops offsets */
13095         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13096         ops_offsets = (u16 *)(firmware->data + offset);
13097         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13098
13099         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13100                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13101                         dev_err(&bp->pdev->dev,
13102                                 "Section offset %d is out of bounds\n", i);
13103                         return -EINVAL;
13104                 }
13105         }
13106
13107         /* Check FW version */
13108         offset = be32_to_cpu(fw_hdr->fw_version.offset);
13109         fw_ver = firmware->data + offset;
13110         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13111             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13112             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13113             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13114                 dev_err(&bp->pdev->dev,
13115                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13116                        fw_ver[0], fw_ver[1], fw_ver[2],
13117                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13118                        BCM_5710_FW_MINOR_VERSION,
13119                        BCM_5710_FW_REVISION_VERSION,
13120                        BCM_5710_FW_ENGINEERING_VERSION);
13121                 return -EINVAL;
13122         }
13123
13124         return 0;
13125 }
13126
13127 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13128 {
13129         const __be32 *source = (const __be32 *)_source;
13130         u32 *target = (u32 *)_target;
13131         u32 i;
13132
13133         for (i = 0; i < n/4; i++)
13134                 target[i] = be32_to_cpu(source[i]);
13135 }
13136
13137 /*
13138    Ops array is stored in the following format:
13139    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13140  */
13141 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13142 {
13143         const __be32 *source = (const __be32 *)_source;
13144         struct raw_op *target = (struct raw_op *)_target;
13145         u32 i, j, tmp;
13146
13147         for (i = 0, j = 0; i < n/8; i++, j += 2) {
13148                 tmp = be32_to_cpu(source[j]);
13149                 target[i].op = (tmp >> 24) & 0xff;
13150                 target[i].offset = tmp & 0xffffff;
13151                 target[i].raw_data = be32_to_cpu(source[j + 1]);
13152         }
13153 }
13154
13155 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13156 {
13157         const __be16 *source = (const __be16 *)_source;
13158         u16 *target = (u16 *)_target;
13159         u32 i;
13160
13161         for (i = 0; i < n/2; i++)
13162                 target[i] = be16_to_cpu(source[i]);
13163 }
13164
13165 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
13166 do {                                                                    \
13167         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
13168         bp->arr = kmalloc(len, GFP_KERNEL);                             \
13169         if (!bp->arr) {                                                 \
13170                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13171                 goto lbl;                                               \
13172         }                                                               \
13173         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
13174              (u8 *)bp->arr, len);                                       \
13175 } while (0)
13176
13177 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13178 {
13179         const char *fw_file_name;
13180         struct bnx2x_fw_file_hdr *fw_hdr;
13181         int rc;
13182
13183         if (CHIP_IS_E1(bp))
13184                 fw_file_name = FW_FILE_NAME_E1;
13185         else if (CHIP_IS_E1H(bp))
13186                 fw_file_name = FW_FILE_NAME_E1H;
13187         else {
13188                 dev_err(dev, "Unsupported chip revision\n");
13189                 return -EINVAL;
13190         }
13191
13192         dev_info(dev, "Loading %s\n", fw_file_name);
13193
13194         rc = request_firmware(&bp->firmware, fw_file_name, dev);
13195         if (rc) {
13196                 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13197                 goto request_firmware_exit;
13198         }
13199
13200         rc = bnx2x_check_firmware(bp);
13201         if (rc) {
13202                 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13203                 goto request_firmware_exit;
13204         }
13205
13206         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13207
13208         /* Initialize the pointers to the init arrays */
13209         /* Blob */
13210         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13211
13212         /* Opcodes */
13213         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13214
13215         /* Offsets */
13216         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13217                             be16_to_cpu_n);
13218
13219         /* STORMs firmware */
13220         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13221                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13222         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
13223                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13224         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13225                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13226         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
13227                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
13228         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13229                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13230         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
13231                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13232         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13233                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13234         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
13235                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
13236
13237         return 0;
13238
13239 init_offsets_alloc_err:
13240         kfree(bp->init_ops);
13241 init_ops_alloc_err:
13242         kfree(bp->init_data);
13243 request_firmware_exit:
13244         release_firmware(bp->firmware);
13245
13246         return rc;
13247 }
13248
13249
13250 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13251                                     const struct pci_device_id *ent)
13252 {
13253         struct net_device *dev = NULL;
13254         struct bnx2x *bp;
13255         int pcie_width, pcie_speed;
13256         int rc;
13257
13258         /* dev zeroed in init_etherdev */
13259         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13260         if (!dev) {
13261                 dev_err(&pdev->dev, "Cannot allocate net device\n");
13262                 return -ENOMEM;
13263         }
13264
13265         bp = netdev_priv(dev);
13266         bp->msg_enable = debug;
13267
13268         pci_set_drvdata(pdev, dev);
13269
13270         rc = bnx2x_init_dev(pdev, dev);
13271         if (rc < 0) {
13272                 free_netdev(dev);
13273                 return rc;
13274         }
13275
13276         rc = bnx2x_init_bp(bp);
13277         if (rc)
13278                 goto init_one_exit;
13279
13280         /* Set init arrays */
13281         rc = bnx2x_init_firmware(bp, &pdev->dev);
13282         if (rc) {
13283                 dev_err(&pdev->dev, "Error loading firmware\n");
13284                 goto init_one_exit;
13285         }
13286
13287         rc = register_netdev(dev);
13288         if (rc) {
13289                 dev_err(&pdev->dev, "Cannot register net device\n");
13290                 goto init_one_exit;
13291         }
13292
13293         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13294         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13295                " IRQ %d, ", board_info[ent->driver_data].name,
13296                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13297                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13298                dev->base_addr, bp->pdev->irq);
13299         pr_cont("node addr %pM\n", dev->dev_addr);
13300
13301         return 0;
13302
13303 init_one_exit:
13304         if (bp->regview)
13305                 iounmap(bp->regview);
13306
13307         if (bp->doorbells)
13308                 iounmap(bp->doorbells);
13309
13310         free_netdev(dev);
13311
13312         if (atomic_read(&pdev->enable_cnt) == 1)
13313                 pci_release_regions(pdev);
13314
13315         pci_disable_device(pdev);
13316         pci_set_drvdata(pdev, NULL);
13317
13318         return rc;
13319 }
13320
13321 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13322 {
13323         struct net_device *dev = pci_get_drvdata(pdev);
13324         struct bnx2x *bp;
13325
13326         if (!dev) {
13327                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13328                 return;
13329         }
13330         bp = netdev_priv(dev);
13331
13332         unregister_netdev(dev);
13333
13334         /* Make sure RESET task is not scheduled before continuing */
13335         cancel_delayed_work_sync(&bp->reset_task);
13336
13337         kfree(bp->init_ops_offsets);
13338         kfree(bp->init_ops);
13339         kfree(bp->init_data);
13340         release_firmware(bp->firmware);
13341
13342         if (bp->regview)
13343                 iounmap(bp->regview);
13344
13345         if (bp->doorbells)
13346                 iounmap(bp->doorbells);
13347
13348         free_netdev(dev);
13349
13350         if (atomic_read(&pdev->enable_cnt) == 1)
13351                 pci_release_regions(pdev);
13352
13353         pci_disable_device(pdev);
13354         pci_set_drvdata(pdev, NULL);
13355 }
13356
13357 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13358 {
13359         struct net_device *dev = pci_get_drvdata(pdev);
13360         struct bnx2x *bp;
13361
13362         if (!dev) {
13363                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13364                 return -ENODEV;
13365         }
13366         bp = netdev_priv(dev);
13367
13368         rtnl_lock();
13369
13370         pci_save_state(pdev);
13371
13372         if (!netif_running(dev)) {
13373                 rtnl_unlock();
13374                 return 0;
13375         }
13376
13377         netif_device_detach(dev);
13378
13379         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13380
13381         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13382
13383         rtnl_unlock();
13384
13385         return 0;
13386 }
13387
13388 static int bnx2x_resume(struct pci_dev *pdev)
13389 {
13390         struct net_device *dev = pci_get_drvdata(pdev);
13391         struct bnx2x *bp;
13392         int rc;
13393
13394         if (!dev) {
13395                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13396                 return -ENODEV;
13397         }
13398         bp = netdev_priv(dev);
13399
13400         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13401                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13402                 return -EAGAIN;
13403         }
13404
13405         rtnl_lock();
13406
13407         pci_restore_state(pdev);
13408
13409         if (!netif_running(dev)) {
13410                 rtnl_unlock();
13411                 return 0;
13412         }
13413
13414         bnx2x_set_power_state(bp, PCI_D0);
13415         netif_device_attach(dev);
13416
13417         rc = bnx2x_nic_load(bp, LOAD_OPEN);
13418
13419         rtnl_unlock();
13420
13421         return rc;
13422 }
13423
13424 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13425 {
13426         int i;
13427
13428         bp->state = BNX2X_STATE_ERROR;
13429
13430         bp->rx_mode = BNX2X_RX_MODE_NONE;
13431
13432         bnx2x_netif_stop(bp, 0);
13433         netif_carrier_off(bp->dev);
13434
13435         del_timer_sync(&bp->timer);
13436         bp->stats_state = STATS_STATE_DISABLED;
13437         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13438
13439         /* Release IRQs */
13440         bnx2x_free_irq(bp, false);
13441
13442         if (CHIP_IS_E1(bp)) {
13443                 struct mac_configuration_cmd *config =
13444                                                 bnx2x_sp(bp, mcast_config);
13445
13446                 for (i = 0; i < config->hdr.length; i++)
13447                         CAM_INVALIDATE(config->config_table[i]);
13448         }
13449
13450         /* Free SKBs, SGEs, TPA pool and driver internals */
13451         bnx2x_free_skbs(bp);
13452         for_each_queue(bp, i)
13453                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13454         for_each_queue(bp, i)
13455                 netif_napi_del(&bnx2x_fp(bp, i, napi));
13456         bnx2x_free_mem(bp);
13457
13458         bp->state = BNX2X_STATE_CLOSED;
13459
13460         return 0;
13461 }
13462
13463 static void bnx2x_eeh_recover(struct bnx2x *bp)
13464 {
13465         u32 val;
13466
13467         mutex_init(&bp->port.phy_mutex);
13468
13469         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13470         bp->link_params.shmem_base = bp->common.shmem_base;
13471         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13472
13473         if (!bp->common.shmem_base ||
13474             (bp->common.shmem_base < 0xA0000) ||
13475             (bp->common.shmem_base >= 0xC0000)) {
13476                 BNX2X_DEV_INFO("MCP not active\n");
13477                 bp->flags |= NO_MCP_FLAG;
13478                 return;
13479         }
13480
13481         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13482         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13483                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13484                 BNX2X_ERR("BAD MCP validity signature\n");
13485
13486         if (!BP_NOMCP(bp)) {
13487                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13488                               & DRV_MSG_SEQ_NUMBER_MASK);
13489                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13490         }
13491 }
13492
13493 /**
13494  * bnx2x_io_error_detected - called when PCI error is detected
13495  * @pdev: Pointer to PCI device
13496  * @state: The current pci connection state
13497  *
13498  * This function is called after a PCI bus error affecting
13499  * this device has been detected.
13500  */
13501 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13502                                                 pci_channel_state_t state)
13503 {
13504         struct net_device *dev = pci_get_drvdata(pdev);
13505         struct bnx2x *bp = netdev_priv(dev);
13506
13507         rtnl_lock();
13508
13509         netif_device_detach(dev);
13510
13511         if (state == pci_channel_io_perm_failure) {
13512                 rtnl_unlock();
13513                 return PCI_ERS_RESULT_DISCONNECT;
13514         }
13515
13516         if (netif_running(dev))
13517                 bnx2x_eeh_nic_unload(bp);
13518
13519         pci_disable_device(pdev);
13520
13521         rtnl_unlock();
13522
13523         /* Request a slot reset */
13524         return PCI_ERS_RESULT_NEED_RESET;
13525 }
13526
13527 /**
13528  * bnx2x_io_slot_reset - called after the PCI bus has been reset
13529  * @pdev: Pointer to PCI device
13530  *
13531  * Restart the card from scratch, as if from a cold-boot.
13532  */
13533 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13534 {
13535         struct net_device *dev = pci_get_drvdata(pdev);
13536         struct bnx2x *bp = netdev_priv(dev);
13537
13538         rtnl_lock();
13539
13540         if (pci_enable_device(pdev)) {
13541                 dev_err(&pdev->dev,
13542                         "Cannot re-enable PCI device after reset\n");
13543                 rtnl_unlock();
13544                 return PCI_ERS_RESULT_DISCONNECT;
13545         }
13546
13547         pci_set_master(pdev);
13548         pci_restore_state(pdev);
13549
13550         if (netif_running(dev))
13551                 bnx2x_set_power_state(bp, PCI_D0);
13552
13553         rtnl_unlock();
13554
13555         return PCI_ERS_RESULT_RECOVERED;
13556 }
13557
13558 /**
13559  * bnx2x_io_resume - called when traffic can start flowing again
13560  * @pdev: Pointer to PCI device
13561  *
13562  * This callback is called when the error recovery driver tells us that
13563  * its OK to resume normal operation.
13564  */
13565 static void bnx2x_io_resume(struct pci_dev *pdev)
13566 {
13567         struct net_device *dev = pci_get_drvdata(pdev);
13568         struct bnx2x *bp = netdev_priv(dev);
13569
13570         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13571                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13572                 return;
13573         }
13574
13575         rtnl_lock();
13576
13577         bnx2x_eeh_recover(bp);
13578
13579         if (netif_running(dev))
13580                 bnx2x_nic_load(bp, LOAD_NORMAL);
13581
13582         netif_device_attach(dev);
13583
13584         rtnl_unlock();
13585 }
13586
13587 static struct pci_error_handlers bnx2x_err_handler = {
13588         .error_detected = bnx2x_io_error_detected,
13589         .slot_reset     = bnx2x_io_slot_reset,
13590         .resume         = bnx2x_io_resume,
13591 };
13592
13593 static struct pci_driver bnx2x_pci_driver = {
13594         .name        = DRV_MODULE_NAME,
13595         .id_table    = bnx2x_pci_tbl,
13596         .probe       = bnx2x_init_one,
13597         .remove      = __devexit_p(bnx2x_remove_one),
13598         .suspend     = bnx2x_suspend,
13599         .resume      = bnx2x_resume,
13600         .err_handler = &bnx2x_err_handler,
13601 };
13602
13603 static int __init bnx2x_init(void)
13604 {
13605         int ret;
13606
13607         pr_info("%s", version);
13608
13609         bnx2x_wq = create_singlethread_workqueue("bnx2x");
13610         if (bnx2x_wq == NULL) {
13611                 pr_err("Cannot create workqueue\n");
13612                 return -ENOMEM;
13613         }
13614
13615         ret = pci_register_driver(&bnx2x_pci_driver);
13616         if (ret) {
13617                 pr_err("Cannot register driver\n");
13618                 destroy_workqueue(bnx2x_wq);
13619         }
13620         return ret;
13621 }
13622
13623 static void __exit bnx2x_cleanup(void)
13624 {
13625         pci_unregister_driver(&bnx2x_pci_driver);
13626
13627         destroy_workqueue(bnx2x_wq);
13628 }
13629
13630 module_init(bnx2x_init);
13631 module_exit(bnx2x_cleanup);
13632
13633 #ifdef BCM_CNIC
13634
13635 /* count denotes the number of new completions we have seen */
13636 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13637 {
13638         struct eth_spe *spe;
13639
13640 #ifdef BNX2X_STOP_ON_ERROR
13641         if (unlikely(bp->panic))
13642                 return;
13643 #endif
13644
13645         spin_lock_bh(&bp->spq_lock);
13646         bp->cnic_spq_pending -= count;
13647
13648         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13649              bp->cnic_spq_pending++) {
13650
13651                 if (!bp->cnic_kwq_pending)
13652                         break;
13653
13654                 spe = bnx2x_sp_get_next(bp);
13655                 *spe = *bp->cnic_kwq_cons;
13656
13657                 bp->cnic_kwq_pending--;
13658
13659                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13660                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13661
13662                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13663                         bp->cnic_kwq_cons = bp->cnic_kwq;
13664                 else
13665                         bp->cnic_kwq_cons++;
13666         }
13667         bnx2x_sp_prod_update(bp);
13668         spin_unlock_bh(&bp->spq_lock);
13669 }
13670
13671 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13672                                struct kwqe_16 *kwqes[], u32 count)
13673 {
13674         struct bnx2x *bp = netdev_priv(dev);
13675         int i;
13676
13677 #ifdef BNX2X_STOP_ON_ERROR
13678         if (unlikely(bp->panic))
13679                 return -EIO;
13680 #endif
13681
13682         spin_lock_bh(&bp->spq_lock);
13683
13684         for (i = 0; i < count; i++) {
13685                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13686
13687                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13688                         break;
13689
13690                 *bp->cnic_kwq_prod = *spe;
13691
13692                 bp->cnic_kwq_pending++;
13693
13694                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13695                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
13696                    spe->data.mac_config_addr.hi,
13697                    spe->data.mac_config_addr.lo,
13698                    bp->cnic_kwq_pending);
13699
13700                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13701                         bp->cnic_kwq_prod = bp->cnic_kwq;
13702                 else
13703                         bp->cnic_kwq_prod++;
13704         }
13705
13706         spin_unlock_bh(&bp->spq_lock);
13707
13708         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13709                 bnx2x_cnic_sp_post(bp, 0);
13710
13711         return i;
13712 }
13713
13714 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13715 {
13716         struct cnic_ops *c_ops;
13717         int rc = 0;
13718
13719         mutex_lock(&bp->cnic_mutex);
13720         c_ops = bp->cnic_ops;
13721         if (c_ops)
13722                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13723         mutex_unlock(&bp->cnic_mutex);
13724
13725         return rc;
13726 }
13727
13728 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13729 {
13730         struct cnic_ops *c_ops;
13731         int rc = 0;
13732
13733         rcu_read_lock();
13734         c_ops = rcu_dereference(bp->cnic_ops);
13735         if (c_ops)
13736                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13737         rcu_read_unlock();
13738
13739         return rc;
13740 }
13741
13742 /*
13743  * for commands that have no data
13744  */
13745 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13746 {
13747         struct cnic_ctl_info ctl = {0};
13748
13749         ctl.cmd = cmd;
13750
13751         return bnx2x_cnic_ctl_send(bp, &ctl);
13752 }
13753
13754 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13755 {
13756         struct cnic_ctl_info ctl;
13757
13758         /* first we tell CNIC and only then we count this as a completion */
13759         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13760         ctl.data.comp.cid = cid;
13761
13762         bnx2x_cnic_ctl_send_bh(bp, &ctl);
13763         bnx2x_cnic_sp_post(bp, 1);
13764 }
13765
13766 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13767 {
13768         struct bnx2x *bp = netdev_priv(dev);
13769         int rc = 0;
13770
13771         switch (ctl->cmd) {
13772         case DRV_CTL_CTXTBL_WR_CMD: {
13773                 u32 index = ctl->data.io.offset;
13774                 dma_addr_t addr = ctl->data.io.dma_addr;
13775
13776                 bnx2x_ilt_wr(bp, index, addr);
13777                 break;
13778         }
13779
13780         case DRV_CTL_COMPLETION_CMD: {
13781                 int count = ctl->data.comp.comp_count;
13782
13783                 bnx2x_cnic_sp_post(bp, count);
13784                 break;
13785         }
13786
13787         /* rtnl_lock is held.  */
13788         case DRV_CTL_START_L2_CMD: {
13789                 u32 cli = ctl->data.ring.client_id;
13790
13791                 bp->rx_mode_cl_mask |= (1 << cli);
13792                 bnx2x_set_storm_rx_mode(bp);
13793                 break;
13794         }
13795
13796         /* rtnl_lock is held.  */
13797         case DRV_CTL_STOP_L2_CMD: {
13798                 u32 cli = ctl->data.ring.client_id;
13799
13800                 bp->rx_mode_cl_mask &= ~(1 << cli);
13801                 bnx2x_set_storm_rx_mode(bp);
13802                 break;
13803         }
13804
13805         default:
13806                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13807                 rc = -EINVAL;
13808         }
13809
13810         return rc;
13811 }
13812
13813 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13814 {
13815         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13816
13817         if (bp->flags & USING_MSIX_FLAG) {
13818                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13819                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13820                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13821         } else {
13822                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13823                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13824         }
13825         cp->irq_arr[0].status_blk = bp->cnic_sb;
13826         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13827         cp->irq_arr[1].status_blk = bp->def_status_blk;
13828         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13829
13830         cp->num_irq = 2;
13831 }
13832
13833 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13834                                void *data)
13835 {
13836         struct bnx2x *bp = netdev_priv(dev);
13837         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13838
13839         if (ops == NULL)
13840                 return -EINVAL;
13841
13842         if (atomic_read(&bp->intr_sem) != 0)
13843                 return -EBUSY;
13844
13845         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13846         if (!bp->cnic_kwq)
13847                 return -ENOMEM;
13848
13849         bp->cnic_kwq_cons = bp->cnic_kwq;
13850         bp->cnic_kwq_prod = bp->cnic_kwq;
13851         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13852
13853         bp->cnic_spq_pending = 0;
13854         bp->cnic_kwq_pending = 0;
13855
13856         bp->cnic_data = data;
13857
13858         cp->num_irq = 0;
13859         cp->drv_state = CNIC_DRV_STATE_REGD;
13860
13861         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13862
13863         bnx2x_setup_cnic_irq_info(bp);
13864         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13865         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13866         rcu_assign_pointer(bp->cnic_ops, ops);
13867
13868         return 0;
13869 }
13870
13871 static int bnx2x_unregister_cnic(struct net_device *dev)
13872 {
13873         struct bnx2x *bp = netdev_priv(dev);
13874         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13875
13876         mutex_lock(&bp->cnic_mutex);
13877         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13878                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13879                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13880         }
13881         cp->drv_state = 0;
13882         rcu_assign_pointer(bp->cnic_ops, NULL);
13883         mutex_unlock(&bp->cnic_mutex);
13884         synchronize_rcu();
13885         kfree(bp->cnic_kwq);
13886         bp->cnic_kwq = NULL;
13887
13888         return 0;
13889 }
13890
13891 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13892 {
13893         struct bnx2x *bp = netdev_priv(dev);
13894         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13895
13896         cp->drv_owner = THIS_MODULE;
13897         cp->chip_id = CHIP_ID(bp);
13898         cp->pdev = bp->pdev;
13899         cp->io_base = bp->regview;
13900         cp->io_base2 = bp->doorbells;
13901         cp->max_kwqe_pending = 8;
13902         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13903         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13904         cp->ctx_tbl_len = CNIC_ILT_LINES;
13905         cp->starting_cid = BCM_CNIC_CID_START;
13906         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13907         cp->drv_ctl = bnx2x_drv_ctl;
13908         cp->drv_register_cnic = bnx2x_register_cnic;
13909         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13910
13911         return cp;
13912 }
13913 EXPORT_SYMBOL(bnx2x_cnic_probe);
13914
13915 #endif /* BCM_CNIC */
13916