Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
[pandora-kernel.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.1-7"
61 #define DRV_MODULE_RELDATE      "2010/02/28"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
124
125 static struct workqueue_struct *bnx2x_wq;
126
127 enum bnx2x_board_type {
128         BCM57710 = 0,
129         BCM57711 = 1,
130         BCM57711E = 2,
131 };
132
133 /* indexed by board_type, above */
134 static struct {
135         char *name;
136 } board_info[] __devinitdata = {
137         { "Broadcom NetXtreme II BCM57710 XGb" },
138         { "Broadcom NetXtreme II BCM57711 XGb" },
139         { "Broadcom NetXtreme II BCM57711E XGb" }
140 };
141
142
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147         { 0 }
148 };
149
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
155
156 /* used only at init
157  * locking is done by mcp
158  */
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160 {
161         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164                                PCICFG_VENDOR_ID_OFFSET);
165 }
166
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 {
169         u32 val;
170
171         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174                                PCICFG_VENDOR_ID_OFFSET);
175
176         return val;
177 }
178
179 static const u32 dmae_reg_go_c[] = {
180         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184 };
185
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188                             int idx)
189 {
190         u32 cmd_offset;
191         int i;
192
193         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
197                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
199         }
200         REG_WR(bp, dmae_reg_go_c[idx], 1);
201 }
202
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204                       u32 len32)
205 {
206         struct dmae_command dmae;
207         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208         int cnt = 200;
209
210         if (!bp->dmae_ready) {
211                 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
214                    "  using indirect\n", dst_addr, len32);
215                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216                 return;
217         }
218
219         memset(&dmae, 0, sizeof(struct dmae_command));
220
221         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
224 #ifdef __BIG_ENDIAN
225                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
226 #else
227                        DMAE_CMD_ENDIANITY_DW_SWAP |
228 #endif
229                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231         dmae.src_addr_lo = U64_LO(dma_addr);
232         dmae.src_addr_hi = U64_HI(dma_addr);
233         dmae.dst_addr_lo = dst_addr >> 2;
234         dmae.dst_addr_hi = 0;
235         dmae.len = len32;
236         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_val = DMAE_COMP_VAL;
239
240         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
242                     "dst_addr [%x:%08x (%08x)]\n"
243            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
244            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
250
251         mutex_lock(&bp->dmae_mutex);
252
253         *wb_comp = 0;
254
255         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256
257         udelay(5);
258
259         while (*wb_comp != DMAE_COMP_VAL) {
260                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
262                 if (!cnt) {
263                         BNX2X_ERR("DMAE timeout!\n");
264                         break;
265                 }
266                 cnt--;
267                 /* adjust delay for emulation/FPGA */
268                 if (CHIP_REV_IS_SLOW(bp))
269                         msleep(100);
270                 else
271                         udelay(5);
272         }
273
274         mutex_unlock(&bp->dmae_mutex);
275 }
276
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
278 {
279         struct dmae_command dmae;
280         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281         int cnt = 200;
282
283         if (!bp->dmae_ready) {
284                 u32 *data = bnx2x_sp(bp, wb_data[0]);
285                 int i;
286
287                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
288                    "  using indirect\n", src_addr, len32);
289                 for (i = 0; i < len32; i++)
290                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291                 return;
292         }
293
294         memset(&dmae, 0, sizeof(struct dmae_command));
295
296         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 #ifdef __BIG_ENDIAN
300                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 #else
302                        DMAE_CMD_ENDIANITY_DW_SWAP |
303 #endif
304                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306         dmae.src_addr_lo = src_addr >> 2;
307         dmae.src_addr_hi = 0;
308         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310         dmae.len = len32;
311         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_val = DMAE_COMP_VAL;
314
315         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
317                     "dst_addr [%x:%08x (%08x)]\n"
318            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
319            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
322
323         mutex_lock(&bp->dmae_mutex);
324
325         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
326         *wb_comp = 0;
327
328         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329
330         udelay(5);
331
332         while (*wb_comp != DMAE_COMP_VAL) {
333
334                 if (!cnt) {
335                         BNX2X_ERR("DMAE timeout!\n");
336                         break;
337                 }
338                 cnt--;
339                 /* adjust delay for emulation/FPGA */
340                 if (CHIP_REV_IS_SLOW(bp))
341                         msleep(100);
342                 else
343                         udelay(5);
344         }
345         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
348
349         mutex_unlock(&bp->dmae_mutex);
350 }
351
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353                                u32 addr, u32 len)
354 {
355         int offset = 0;
356
357         while (len > DMAE_LEN32_WR_MAX) {
358                 bnx2x_write_dmae(bp, phys_addr + offset,
359                                  addr + offset, DMAE_LEN32_WR_MAX);
360                 offset += DMAE_LEN32_WR_MAX * 4;
361                 len -= DMAE_LEN32_WR_MAX;
362         }
363
364         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365 }
366
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 {
370         u32 wb_write[2];
371
372         wb_write[0] = val_hi;
373         wb_write[1] = val_lo;
374         REG_WR_DMAE(bp, reg, wb_write, 2);
375 }
376
377 #ifdef USE_WB_RD
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 {
380         u32 wb_data[2];
381
382         REG_RD_DMAE(bp, reg, wb_data, 2);
383
384         return HILO_U64(wb_data[0], wb_data[1]);
385 }
386 #endif
387
388 static int bnx2x_mc_assert(struct bnx2x *bp)
389 {
390         char last_idx;
391         int i, rc = 0;
392         u32 row0, row1, row2, row3;
393
394         /* XSTORM */
395         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
397         if (last_idx)
398                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400         /* print the asserts */
401         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i));
405                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414                                   " 0x%08x 0x%08x 0x%08x\n",
415                                   i, row3, row2, row1, row0);
416                         rc++;
417                 } else {
418                         break;
419                 }
420         }
421
422         /* TSTORM */
423         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
425         if (last_idx)
426                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428         /* print the asserts */
429         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i));
433                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442                                   " 0x%08x 0x%08x 0x%08x\n",
443                                   i, row3, row2, row1, row0);
444                         rc++;
445                 } else {
446                         break;
447                 }
448         }
449
450         /* CSTORM */
451         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
453         if (last_idx)
454                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456         /* print the asserts */
457         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i));
461                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470                                   " 0x%08x 0x%08x 0x%08x\n",
471                                   i, row3, row2, row1, row0);
472                         rc++;
473                 } else {
474                         break;
475                 }
476         }
477
478         /* USTORM */
479         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480                            USTORM_ASSERT_LIST_INDEX_OFFSET);
481         if (last_idx)
482                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484         /* print the asserts */
485         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i));
489                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
491                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
493                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498                                   " 0x%08x 0x%08x 0x%08x\n",
499                                   i, row3, row2, row1, row0);
500                         rc++;
501                 } else {
502                         break;
503                 }
504         }
505
506         return rc;
507 }
508
509 static void bnx2x_fw_dump(struct bnx2x *bp)
510 {
511         u32 mark, offset;
512         __be32 data[9];
513         int word;
514
515         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516         mark = ((mark + 0x3) & ~0x3);
517         pr_err("begin fw dump (mark 0x%x)\n", mark);
518
519         pr_err("");
520         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521                 for (word = 0; word < 8; word++)
522                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523                                                   offset + 4*word));
524                 data[8] = 0x0;
525                 pr_cont("%s", (char *)data);
526         }
527         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528                 for (word = 0; word < 8; word++)
529                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530                                                   offset + 4*word));
531                 data[8] = 0x0;
532                 pr_cont("%s", (char *)data);
533         }
534         pr_err("end of fw dump\n");
535 }
536
537 static void bnx2x_panic_dump(struct bnx2x *bp)
538 {
539         int i;
540         u16 j, start, end;
541
542         bp->stats_state = STATS_STATE_DISABLED;
543         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
545         BNX2X_ERR("begin crash dump -----------------\n");
546
547         /* Indices */
548         /* Common */
549         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
550                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
551                   "  spq_prod_idx(%u)\n",
552                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555         /* Rx */
556         for_each_queue(bp, i) {
557                 struct bnx2x_fastpath *fp = &bp->fp[i];
558
559                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
560                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
561                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
562                           i, fp->rx_bd_prod, fp->rx_bd_cons,
563                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
566                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
567                           fp->rx_sge_prod, fp->last_max_sge,
568                           le16_to_cpu(fp->fp_u_idx),
569                           fp->status_blk->u_status_block.status_block_index);
570         }
571
572         /* Tx */
573         for_each_queue(bp, i) {
574                 struct bnx2x_fastpath *fp = &bp->fp[i];
575
576                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
577                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
578                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
581                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582                           fp->status_blk->c_status_block.status_block_index,
583                           fp->tx_db.data.prod);
584         }
585
586         /* Rings */
587         /* Rx */
588         for_each_queue(bp, i) {
589                 struct bnx2x_fastpath *fp = &bp->fp[i];
590
591                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593                 for (j = start; j != end; j = RX_BD(j + 1)) {
594                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
597                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
598                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
599                 }
600
601                 start = RX_SGE(fp->rx_sge_prod);
602                 end = RX_SGE(fp->last_max_sge);
603                 for (j = start; j != end; j = RX_SGE(j + 1)) {
604                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
607                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
608                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
609                 }
610
611                 start = RCQ_BD(fp->rx_comp_cons - 10);
612                 end = RCQ_BD(fp->rx_comp_cons + 503);
613                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
616                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
618                 }
619         }
620
621         /* Tx */
622         for_each_queue(bp, i) {
623                 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627                 for (j = start; j != end; j = TX_BD(j + 1)) {
628                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
630                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631                                   i, j, sw_bd->skb, sw_bd->first_bd);
632                 }
633
634                 start = TX_BD(fp->tx_bd_cons - 10);
635                 end = TX_BD(fp->tx_bd_cons + 254);
636                 for (j = start; j != end; j = TX_BD(j + 1)) {
637                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
639                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
641                 }
642         }
643
644         bnx2x_fw_dump(bp);
645         bnx2x_mc_assert(bp);
646         BNX2X_ERR("end crash dump -----------------\n");
647 }
648
649 static void bnx2x_int_enable(struct bnx2x *bp)
650 {
651         int port = BP_PORT(bp);
652         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653         u32 val = REG_RD(bp, addr);
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
656
657         if (msix) {
658                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659                          HC_CONFIG_0_REG_INT_LINE_EN_0);
660                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
662         } else if (msi) {
663                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
667         } else {
668                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
671                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674                    val, port, addr);
675
676                 REG_WR(bp, addr, val);
677
678                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679         }
680
681         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
682            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
683
684         REG_WR(bp, addr, val);
685         /*
686          * Ensure that HC_CONFIG is written before leading/trailing edge config
687          */
688         mmiowb();
689         barrier();
690
691         if (CHIP_IS_E1H(bp)) {
692                 /* init leading/trailing edge */
693                 if (IS_E1HMF(bp)) {
694                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
695                         if (bp->port.pmf)
696                                 /* enable nig and gpio3 attention */
697                                 val |= 0x1100;
698                 } else
699                         val = 0xffff;
700
701                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703         }
704
705         /* Make sure that interrupts are indeed enabled from here on */
706         mmiowb();
707 }
708
709 static void bnx2x_int_disable(struct bnx2x *bp)
710 {
711         int port = BP_PORT(bp);
712         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713         u32 val = REG_RD(bp, addr);
714
715         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
718                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721            val, port, addr);
722
723         /* flush all outstanding writes */
724         mmiowb();
725
726         REG_WR(bp, addr, val);
727         if (REG_RD(bp, addr) != val)
728                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729 }
730
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
732 {
733         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
734         int i, offset;
735
736         /* disable interrupt handling */
737         atomic_inc(&bp->intr_sem);
738         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
740         if (disable_hw)
741                 /* prevent the HW from sending interrupts */
742                 bnx2x_int_disable(bp);
743
744         /* make sure all ISRs are done */
745         if (msix) {
746                 synchronize_irq(bp->msix_table[0].vector);
747                 offset = 1;
748 #ifdef BCM_CNIC
749                 offset++;
750 #endif
751                 for_each_queue(bp, i)
752                         synchronize_irq(bp->msix_table[i + offset].vector);
753         } else
754                 synchronize_irq(bp->pdev->irq);
755
756         /* make sure sp_task is not running */
757         cancel_delayed_work(&bp->sp_task);
758         flush_workqueue(bnx2x_wq);
759 }
760
761 /* fast path */
762
763 /*
764  * General service functions
765  */
766
767 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768                                 u8 storm, u16 index, u8 op, u8 update)
769 {
770         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771                        COMMAND_REG_INT_ACK);
772         struct igu_ack_register igu_ack;
773
774         igu_ack.status_block_index = index;
775         igu_ack.sb_id_and_flags =
776                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
777                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
781         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782            (*(u32 *)&igu_ack), hc_addr);
783         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
784
785         /* Make sure that ACK is written */
786         mmiowb();
787         barrier();
788 }
789
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
791 {
792         struct host_status_block *fpsb = fp->status_blk;
793
794         barrier(); /* status block is written to by the chip */
795         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
797 }
798
799 static u16 bnx2x_ack_int(struct bnx2x *bp)
800 {
801         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802                        COMMAND_REG_SIMD_MASK);
803         u32 result = REG_RD(bp, hc_addr);
804
805         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806            result, hc_addr);
807
808         return result;
809 }
810
811
812 /*
813  * fast path service functions
814  */
815
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817 {
818         /* Tell compiler that consumer and producer can change */
819         barrier();
820         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
821 }
822
823 /* free skb in the packet ring at pos idx
824  * return idx of last bd freed
825  */
826 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827                              u16 idx)
828 {
829         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
830         struct eth_tx_start_bd *tx_start_bd;
831         struct eth_tx_bd *tx_data_bd;
832         struct sk_buff *skb = tx_buf->skb;
833         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
834         int nbd;
835
836         /* prefetch skb end pointer to speedup dev_kfree_skb() */
837         prefetch(&skb->end);
838
839         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
840            idx, tx_buf, skb);
841
842         /* unmap first bd */
843         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847
848         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
851                 BNX2X_ERR("BAD nbd!\n");
852                 bnx2x_panic();
853         }
854 #endif
855         new_cons = nbd + tx_buf->first_bd;
856
857         /* Get the next bd */
858         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860         /* Skip a parse bd... */
861         --nbd;
862         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864         /* ...and the TSO split header bd since they have no mapping */
865         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866                 --nbd;
867                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
868         }
869
870         /* now free frags */
871         while (nbd > 0) {
872
873                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
877                 if (--nbd)
878                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879         }
880
881         /* release skb */
882         WARN_ON(!skb);
883         dev_kfree_skb(skb);
884         tx_buf->first_bd = 0;
885         tx_buf->skb = NULL;
886
887         return new_cons;
888 }
889
890 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
891 {
892         s16 used;
893         u16 prod;
894         u16 cons;
895
896         barrier(); /* Tell compiler that prod and cons can change */
897         prod = fp->tx_bd_prod;
898         cons = fp->tx_bd_cons;
899
900         /* NUM_TX_RINGS = number of "next-page" entries
901            It will be used as a threshold */
902         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905         WARN_ON(used < 0);
906         WARN_ON(used > fp->bp->tx_ring_size);
907         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
908 #endif
909
910         return (s16)(fp->bp->tx_ring_size) - used;
911 }
912
913 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914 {
915         u16 hw_cons;
916
917         /* Tell compiler that status block fields can change */
918         barrier();
919         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920         return hw_cons != fp->tx_pkt_cons;
921 }
922
923 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
924 {
925         struct bnx2x *bp = fp->bp;
926         struct netdev_queue *txq;
927         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
928
929 #ifdef BNX2X_STOP_ON_ERROR
930         if (unlikely(bp->panic))
931                 return -1;
932 #endif
933
934         txq = netdev_get_tx_queue(bp->dev, fp->index);
935         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936         sw_cons = fp->tx_pkt_cons;
937
938         while (sw_cons != hw_cons) {
939                 u16 pkt_cons;
940
941                 pkt_cons = TX_BD(sw_cons);
942
943                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
945                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
946                    hw_cons, sw_cons, pkt_cons);
947
948 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
949                         rmb();
950                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951                 }
952 */
953                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954                 sw_cons++;
955         }
956
957         fp->tx_pkt_cons = sw_cons;
958         fp->tx_bd_cons = bd_cons;
959
960         /* Need to make the tx_bd_cons update visible to start_xmit()
961          * before checking for netif_tx_queue_stopped().  Without the
962          * memory barrier, there is a small possibility that
963          * start_xmit() will miss it and cause the queue to be stopped
964          * forever.
965          */
966         smp_wmb();
967
968         /* TBD need a thresh? */
969         if (unlikely(netif_tx_queue_stopped(txq))) {
970                 /* Taking tx_lock() is needed to prevent reenabling the queue
971                  * while it's empty. This could have happen if rx_action() gets
972                  * suspended in bnx2x_tx_int() after the condition before
973                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
974                  *
975                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
976                  * sends some packets consuming the whole queue again->
977                  * stops the queue
978                  */
979
980                 __netif_tx_lock(txq, smp_processor_id());
981
982                 if ((netif_tx_queue_stopped(txq)) &&
983                     (bp->state == BNX2X_STATE_OPEN) &&
984                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
985                         netif_tx_wake_queue(txq);
986
987                 __netif_tx_unlock(txq);
988         }
989         return 0;
990 }
991
992 #ifdef BCM_CNIC
993 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
994 #endif
995
996 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
997                            union eth_rx_cqe *rr_cqe)
998 {
999         struct bnx2x *bp = fp->bp;
1000         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1001         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1002
1003         DP(BNX2X_MSG_SP,
1004            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1005            fp->index, cid, command, bp->state,
1006            rr_cqe->ramrod_cqe.ramrod_type);
1007
1008         bp->spq_left++;
1009
1010         if (fp->index) {
1011                 switch (command | fp->state) {
1012                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1013                                                 BNX2X_FP_STATE_OPENING):
1014                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1015                            cid);
1016                         fp->state = BNX2X_FP_STATE_OPEN;
1017                         break;
1018
1019                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1020                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1021                            cid);
1022                         fp->state = BNX2X_FP_STATE_HALTED;
1023                         break;
1024
1025                 default:
1026                         BNX2X_ERR("unexpected MC reply (%d)  "
1027                                   "fp->state is %x\n", command, fp->state);
1028                         break;
1029                 }
1030                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1031                 return;
1032         }
1033
1034         switch (command | bp->state) {
1035         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1036                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1037                 bp->state = BNX2X_STATE_OPEN;
1038                 break;
1039
1040         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1041                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1042                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1043                 fp->state = BNX2X_FP_STATE_HALTED;
1044                 break;
1045
1046         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1047                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1048                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1049                 break;
1050
1051 #ifdef BCM_CNIC
1052         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1053                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1054                 bnx2x_cnic_cfc_comp(bp, cid);
1055                 break;
1056 #endif
1057
1058         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1059         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1060                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1061                 bp->set_mac_pending--;
1062                 smp_wmb();
1063                 break;
1064
1065         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1066                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1067                 bp->set_mac_pending--;
1068                 smp_wmb();
1069                 break;
1070
1071         default:
1072                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1073                           command, bp->state);
1074                 break;
1075         }
1076         mb(); /* force bnx2x_wait_ramrod() to see the change */
1077 }
1078
1079 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1080                                      struct bnx2x_fastpath *fp, u16 index)
1081 {
1082         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1083         struct page *page = sw_buf->page;
1084         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1085
1086         /* Skip "next page" elements */
1087         if (!page)
1088                 return;
1089
1090         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1091                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1092         __free_pages(page, PAGES_PER_SGE_SHIFT);
1093
1094         sw_buf->page = NULL;
1095         sge->addr_hi = 0;
1096         sge->addr_lo = 0;
1097 }
1098
1099 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1100                                            struct bnx2x_fastpath *fp, int last)
1101 {
1102         int i;
1103
1104         for (i = 0; i < last; i++)
1105                 bnx2x_free_rx_sge(bp, fp, i);
1106 }
1107
1108 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1109                                      struct bnx2x_fastpath *fp, u16 index)
1110 {
1111         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1112         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1113         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1114         dma_addr_t mapping;
1115
1116         if (unlikely(page == NULL))
1117                 return -ENOMEM;
1118
1119         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1120                                PCI_DMA_FROMDEVICE);
1121         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1122                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1123                 return -ENOMEM;
1124         }
1125
1126         sw_buf->page = page;
1127         pci_unmap_addr_set(sw_buf, mapping, mapping);
1128
1129         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1130         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1131
1132         return 0;
1133 }
1134
1135 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1136                                      struct bnx2x_fastpath *fp, u16 index)
1137 {
1138         struct sk_buff *skb;
1139         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1140         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1141         dma_addr_t mapping;
1142
1143         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1144         if (unlikely(skb == NULL))
1145                 return -ENOMEM;
1146
1147         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1148                                  PCI_DMA_FROMDEVICE);
1149         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1150                 dev_kfree_skb(skb);
1151                 return -ENOMEM;
1152         }
1153
1154         rx_buf->skb = skb;
1155         pci_unmap_addr_set(rx_buf, mapping, mapping);
1156
1157         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1158         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1159
1160         return 0;
1161 }
1162
1163 /* note that we are not allocating a new skb,
1164  * we are just moving one from cons to prod
1165  * we are not creating a new mapping,
1166  * so there is no need to check for dma_mapping_error().
1167  */
1168 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1169                                struct sk_buff *skb, u16 cons, u16 prod)
1170 {
1171         struct bnx2x *bp = fp->bp;
1172         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1173         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1174         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1175         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1176
1177         pci_dma_sync_single_for_device(bp->pdev,
1178                                        pci_unmap_addr(cons_rx_buf, mapping),
1179                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1180
1181         prod_rx_buf->skb = cons_rx_buf->skb;
1182         pci_unmap_addr_set(prod_rx_buf, mapping,
1183                            pci_unmap_addr(cons_rx_buf, mapping));
1184         *prod_bd = *cons_bd;
1185 }
1186
1187 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1188                                              u16 idx)
1189 {
1190         u16 last_max = fp->last_max_sge;
1191
1192         if (SUB_S16(idx, last_max) > 0)
1193                 fp->last_max_sge = idx;
1194 }
1195
1196 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1197 {
1198         int i, j;
1199
1200         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1201                 int idx = RX_SGE_CNT * i - 1;
1202
1203                 for (j = 0; j < 2; j++) {
1204                         SGE_MASK_CLEAR_BIT(fp, idx);
1205                         idx--;
1206                 }
1207         }
1208 }
1209
1210 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1211                                   struct eth_fast_path_rx_cqe *fp_cqe)
1212 {
1213         struct bnx2x *bp = fp->bp;
1214         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1215                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1216                       SGE_PAGE_SHIFT;
1217         u16 last_max, last_elem, first_elem;
1218         u16 delta = 0;
1219         u16 i;
1220
1221         if (!sge_len)
1222                 return;
1223
1224         /* First mark all used pages */
1225         for (i = 0; i < sge_len; i++)
1226                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1227
1228         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1229            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1230
1231         /* Here we assume that the last SGE index is the biggest */
1232         prefetch((void *)(fp->sge_mask));
1233         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1234
1235         last_max = RX_SGE(fp->last_max_sge);
1236         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1237         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1238
1239         /* If ring is not full */
1240         if (last_elem + 1 != first_elem)
1241                 last_elem++;
1242
1243         /* Now update the prod */
1244         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1245                 if (likely(fp->sge_mask[i]))
1246                         break;
1247
1248                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1249                 delta += RX_SGE_MASK_ELEM_SZ;
1250         }
1251
1252         if (delta > 0) {
1253                 fp->rx_sge_prod += delta;
1254                 /* clear page-end entries */
1255                 bnx2x_clear_sge_mask_next_elems(fp);
1256         }
1257
1258         DP(NETIF_MSG_RX_STATUS,
1259            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1260            fp->last_max_sge, fp->rx_sge_prod);
1261 }
1262
1263 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1264 {
1265         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1266         memset(fp->sge_mask, 0xff,
1267                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1268
1269         /* Clear the two last indices in the page to 1:
1270            these are the indices that correspond to the "next" element,
1271            hence will never be indicated and should be removed from
1272            the calculations. */
1273         bnx2x_clear_sge_mask_next_elems(fp);
1274 }
1275
1276 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1277                             struct sk_buff *skb, u16 cons, u16 prod)
1278 {
1279         struct bnx2x *bp = fp->bp;
1280         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1281         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1282         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1283         dma_addr_t mapping;
1284
1285         /* move empty skb from pool to prod and map it */
1286         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1287         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1288                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1289         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1290
1291         /* move partial skb from cons to pool (don't unmap yet) */
1292         fp->tpa_pool[queue] = *cons_rx_buf;
1293
1294         /* mark bin state as start - print error if current state != stop */
1295         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1296                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1297
1298         fp->tpa_state[queue] = BNX2X_TPA_START;
1299
1300         /* point prod_bd to new skb */
1301         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1302         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1303
1304 #ifdef BNX2X_STOP_ON_ERROR
1305         fp->tpa_queue_used |= (1 << queue);
1306 #ifdef __powerpc64__
1307         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1308 #else
1309         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1310 #endif
1311            fp->tpa_queue_used);
1312 #endif
1313 }
1314
1315 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1316                                struct sk_buff *skb,
1317                                struct eth_fast_path_rx_cqe *fp_cqe,
1318                                u16 cqe_idx)
1319 {
1320         struct sw_rx_page *rx_pg, old_rx_pg;
1321         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1322         u32 i, frag_len, frag_size, pages;
1323         int err;
1324         int j;
1325
1326         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1327         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1328
1329         /* This is needed in order to enable forwarding support */
1330         if (frag_size)
1331                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1332                                                max(frag_size, (u32)len_on_bd));
1333
1334 #ifdef BNX2X_STOP_ON_ERROR
1335         if (pages >
1336             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1337                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1338                           pages, cqe_idx);
1339                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1340                           fp_cqe->pkt_len, len_on_bd);
1341                 bnx2x_panic();
1342                 return -EINVAL;
1343         }
1344 #endif
1345
1346         /* Run through the SGL and compose the fragmented skb */
1347         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1348                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1349
1350                 /* FW gives the indices of the SGE as if the ring is an array
1351                    (meaning that "next" element will consume 2 indices) */
1352                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1353                 rx_pg = &fp->rx_page_ring[sge_idx];
1354                 old_rx_pg = *rx_pg;
1355
1356                 /* If we fail to allocate a substitute page, we simply stop
1357                    where we are and drop the whole packet */
1358                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1359                 if (unlikely(err)) {
1360                         fp->eth_q_stats.rx_skb_alloc_failed++;
1361                         return err;
1362                 }
1363
1364                 /* Unmap the page as we r going to pass it to the stack */
1365                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1366                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1367
1368                 /* Add one frag and update the appropriate fields in the skb */
1369                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1370
1371                 skb->data_len += frag_len;
1372                 skb->truesize += frag_len;
1373                 skb->len += frag_len;
1374
1375                 frag_size -= frag_len;
1376         }
1377
1378         return 0;
1379 }
1380
1381 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1382                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1383                            u16 cqe_idx)
1384 {
1385         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1386         struct sk_buff *skb = rx_buf->skb;
1387         /* alloc new skb */
1388         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1389
1390         /* Unmap skb in the pool anyway, as we are going to change
1391            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1392            fails. */
1393         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1394                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1395
1396         if (likely(new_skb)) {
1397                 /* fix ip xsum and give it to the stack */
1398                 /* (no need to map the new skb) */
1399 #ifdef BCM_VLAN
1400                 int is_vlan_cqe =
1401                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1402                          PARSING_FLAGS_VLAN);
1403                 int is_not_hwaccel_vlan_cqe =
1404                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1405 #endif
1406
1407                 prefetch(skb);
1408                 prefetch(((char *)(skb)) + 128);
1409
1410 #ifdef BNX2X_STOP_ON_ERROR
1411                 if (pad + len > bp->rx_buf_size) {
1412                         BNX2X_ERR("skb_put is about to fail...  "
1413                                   "pad %d  len %d  rx_buf_size %d\n",
1414                                   pad, len, bp->rx_buf_size);
1415                         bnx2x_panic();
1416                         return;
1417                 }
1418 #endif
1419
1420                 skb_reserve(skb, pad);
1421                 skb_put(skb, len);
1422
1423                 skb->protocol = eth_type_trans(skb, bp->dev);
1424                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1425
1426                 {
1427                         struct iphdr *iph;
1428
1429                         iph = (struct iphdr *)skb->data;
1430 #ifdef BCM_VLAN
1431                         /* If there is no Rx VLAN offloading -
1432                            take VLAN tag into an account */
1433                         if (unlikely(is_not_hwaccel_vlan_cqe))
1434                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1435 #endif
1436                         iph->check = 0;
1437                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1438                 }
1439
1440                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1441                                          &cqe->fast_path_cqe, cqe_idx)) {
1442 #ifdef BCM_VLAN
1443                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1444                             (!is_not_hwaccel_vlan_cqe))
1445                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1446                                                 le16_to_cpu(cqe->fast_path_cqe.
1447                                                             vlan_tag));
1448                         else
1449 #endif
1450                                 netif_receive_skb(skb);
1451                 } else {
1452                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1453                            " - dropping packet!\n");
1454                         dev_kfree_skb(skb);
1455                 }
1456
1457
1458                 /* put new skb in bin */
1459                 fp->tpa_pool[queue].skb = new_skb;
1460
1461         } else {
1462                 /* else drop the packet and keep the buffer in the bin */
1463                 DP(NETIF_MSG_RX_STATUS,
1464                    "Failed to allocate new skb - dropping packet!\n");
1465                 fp->eth_q_stats.rx_skb_alloc_failed++;
1466         }
1467
1468         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1469 }
1470
1471 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1472                                         struct bnx2x_fastpath *fp,
1473                                         u16 bd_prod, u16 rx_comp_prod,
1474                                         u16 rx_sge_prod)
1475 {
1476         struct ustorm_eth_rx_producers rx_prods = {0};
1477         int i;
1478
1479         /* Update producers */
1480         rx_prods.bd_prod = bd_prod;
1481         rx_prods.cqe_prod = rx_comp_prod;
1482         rx_prods.sge_prod = rx_sge_prod;
1483
1484         /*
1485          * Make sure that the BD and SGE data is updated before updating the
1486          * producers since FW might read the BD/SGE right after the producer
1487          * is updated.
1488          * This is only applicable for weak-ordered memory model archs such
1489          * as IA-64. The following barrier is also mandatory since FW will
1490          * assumes BDs must have buffers.
1491          */
1492         wmb();
1493
1494         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1495                 REG_WR(bp, BAR_USTRORM_INTMEM +
1496                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1497                        ((u32 *)&rx_prods)[i]);
1498
1499         mmiowb(); /* keep prod updates ordered */
1500
1501         DP(NETIF_MSG_RX_STATUS,
1502            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1503            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1504 }
1505
1506 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1507 {
1508         struct bnx2x *bp = fp->bp;
1509         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1510         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1511         int rx_pkt = 0;
1512
1513 #ifdef BNX2X_STOP_ON_ERROR
1514         if (unlikely(bp->panic))
1515                 return 0;
1516 #endif
1517
1518         /* CQ "next element" is of the size of the regular element,
1519            that's why it's ok here */
1520         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1521         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1522                 hw_comp_cons++;
1523
1524         bd_cons = fp->rx_bd_cons;
1525         bd_prod = fp->rx_bd_prod;
1526         bd_prod_fw = bd_prod;
1527         sw_comp_cons = fp->rx_comp_cons;
1528         sw_comp_prod = fp->rx_comp_prod;
1529
1530         /* Memory barrier necessary as speculative reads of the rx
1531          * buffer can be ahead of the index in the status block
1532          */
1533         rmb();
1534
1535         DP(NETIF_MSG_RX_STATUS,
1536            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1537            fp->index, hw_comp_cons, sw_comp_cons);
1538
1539         while (sw_comp_cons != hw_comp_cons) {
1540                 struct sw_rx_bd *rx_buf = NULL;
1541                 struct sk_buff *skb;
1542                 union eth_rx_cqe *cqe;
1543                 u8 cqe_fp_flags;
1544                 u16 len, pad;
1545
1546                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1547                 bd_prod = RX_BD(bd_prod);
1548                 bd_cons = RX_BD(bd_cons);
1549
1550                 /* Prefetch the page containing the BD descriptor
1551                    at producer's index. It will be needed when new skb is
1552                    allocated */
1553                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1554                                              (&fp->rx_desc_ring[bd_prod])) -
1555                                   PAGE_SIZE + 1));
1556
1557                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1558                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1559
1560                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1561                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1562                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1563                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1564                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1565                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1566
1567                 /* is this a slowpath msg? */
1568                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1569                         bnx2x_sp_event(fp, cqe);
1570                         goto next_cqe;
1571
1572                 /* this is an rx packet */
1573                 } else {
1574                         rx_buf = &fp->rx_buf_ring[bd_cons];
1575                         skb = rx_buf->skb;
1576                         prefetch(skb);
1577                         prefetch((u8 *)skb + 256);
1578                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1579                         pad = cqe->fast_path_cqe.placement_offset;
1580
1581                         /* If CQE is marked both TPA_START and TPA_END
1582                            it is a non-TPA CQE */
1583                         if ((!fp->disable_tpa) &&
1584                             (TPA_TYPE(cqe_fp_flags) !=
1585                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1586                                 u16 queue = cqe->fast_path_cqe.queue_index;
1587
1588                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1589                                         DP(NETIF_MSG_RX_STATUS,
1590                                            "calling tpa_start on queue %d\n",
1591                                            queue);
1592
1593                                         bnx2x_tpa_start(fp, queue, skb,
1594                                                         bd_cons, bd_prod);
1595                                         goto next_rx;
1596                                 }
1597
1598                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1599                                         DP(NETIF_MSG_RX_STATUS,
1600                                            "calling tpa_stop on queue %d\n",
1601                                            queue);
1602
1603                                         if (!BNX2X_RX_SUM_FIX(cqe))
1604                                                 BNX2X_ERR("STOP on none TCP "
1605                                                           "data\n");
1606
1607                                         /* This is a size of the linear data
1608                                            on this skb */
1609                                         len = le16_to_cpu(cqe->fast_path_cqe.
1610                                                                 len_on_bd);
1611                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1612                                                     len, cqe, comp_ring_cons);
1613 #ifdef BNX2X_STOP_ON_ERROR
1614                                         if (bp->panic)
1615                                                 return 0;
1616 #endif
1617
1618                                         bnx2x_update_sge_prod(fp,
1619                                                         &cqe->fast_path_cqe);
1620                                         goto next_cqe;
1621                                 }
1622                         }
1623
1624                         pci_dma_sync_single_for_device(bp->pdev,
1625                                         pci_unmap_addr(rx_buf, mapping),
1626                                                        pad + RX_COPY_THRESH,
1627                                                        PCI_DMA_FROMDEVICE);
1628                         prefetch(skb);
1629                         prefetch(((char *)(skb)) + 128);
1630
1631                         /* is this an error packet? */
1632                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1633                                 DP(NETIF_MSG_RX_ERR,
1634                                    "ERROR  flags %x  rx packet %u\n",
1635                                    cqe_fp_flags, sw_comp_cons);
1636                                 fp->eth_q_stats.rx_err_discard_pkt++;
1637                                 goto reuse_rx;
1638                         }
1639
1640                         /* Since we don't have a jumbo ring
1641                          * copy small packets if mtu > 1500
1642                          */
1643                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1644                             (len <= RX_COPY_THRESH)) {
1645                                 struct sk_buff *new_skb;
1646
1647                                 new_skb = netdev_alloc_skb(bp->dev,
1648                                                            len + pad);
1649                                 if (new_skb == NULL) {
1650                                         DP(NETIF_MSG_RX_ERR,
1651                                            "ERROR  packet dropped "
1652                                            "because of alloc failure\n");
1653                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1654                                         goto reuse_rx;
1655                                 }
1656
1657                                 /* aligned copy */
1658                                 skb_copy_from_linear_data_offset(skb, pad,
1659                                                     new_skb->data + pad, len);
1660                                 skb_reserve(new_skb, pad);
1661                                 skb_put(new_skb, len);
1662
1663                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1664
1665                                 skb = new_skb;
1666
1667                         } else
1668                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1669                                 pci_unmap_single(bp->pdev,
1670                                         pci_unmap_addr(rx_buf, mapping),
1671                                                  bp->rx_buf_size,
1672                                                  PCI_DMA_FROMDEVICE);
1673                                 skb_reserve(skb, pad);
1674                                 skb_put(skb, len);
1675
1676                         } else {
1677                                 DP(NETIF_MSG_RX_ERR,
1678                                    "ERROR  packet dropped because "
1679                                    "of alloc failure\n");
1680                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1681 reuse_rx:
1682                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1683                                 goto next_rx;
1684                         }
1685
1686                         skb->protocol = eth_type_trans(skb, bp->dev);
1687
1688                         skb->ip_summed = CHECKSUM_NONE;
1689                         if (bp->rx_csum) {
1690                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1691                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1692                                 else
1693                                         fp->eth_q_stats.hw_csum_err++;
1694                         }
1695                 }
1696
1697                 skb_record_rx_queue(skb, fp->index);
1698
1699 #ifdef BCM_VLAN
1700                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1701                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1702                      PARSING_FLAGS_VLAN))
1703                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1704                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1705                 else
1706 #endif
1707                         netif_receive_skb(skb);
1708
1709
1710 next_rx:
1711                 rx_buf->skb = NULL;
1712
1713                 bd_cons = NEXT_RX_IDX(bd_cons);
1714                 bd_prod = NEXT_RX_IDX(bd_prod);
1715                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1716                 rx_pkt++;
1717 next_cqe:
1718                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1719                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1720
1721                 if (rx_pkt == budget)
1722                         break;
1723         } /* while */
1724
1725         fp->rx_bd_cons = bd_cons;
1726         fp->rx_bd_prod = bd_prod_fw;
1727         fp->rx_comp_cons = sw_comp_cons;
1728         fp->rx_comp_prod = sw_comp_prod;
1729
1730         /* Update producers */
1731         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1732                              fp->rx_sge_prod);
1733
1734         fp->rx_pkt += rx_pkt;
1735         fp->rx_calls++;
1736
1737         return rx_pkt;
1738 }
1739
1740 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1741 {
1742         struct bnx2x_fastpath *fp = fp_cookie;
1743         struct bnx2x *bp = fp->bp;
1744
1745         /* Return here if interrupt is disabled */
1746         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1747                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1748                 return IRQ_HANDLED;
1749         }
1750
1751         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1752            fp->index, fp->sb_id);
1753         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1754
1755 #ifdef BNX2X_STOP_ON_ERROR
1756         if (unlikely(bp->panic))
1757                 return IRQ_HANDLED;
1758 #endif
1759
1760         /* Handle Rx and Tx according to MSI-X vector */
1761         prefetch(fp->rx_cons_sb);
1762         prefetch(fp->tx_cons_sb);
1763         prefetch(&fp->status_blk->u_status_block.status_block_index);
1764         prefetch(&fp->status_blk->c_status_block.status_block_index);
1765         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1766
1767         return IRQ_HANDLED;
1768 }
1769
1770 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1771 {
1772         struct bnx2x *bp = netdev_priv(dev_instance);
1773         u16 status = bnx2x_ack_int(bp);
1774         u16 mask;
1775         int i;
1776
1777         /* Return here if interrupt is shared and it's not for us */
1778         if (unlikely(status == 0)) {
1779                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1780                 return IRQ_NONE;
1781         }
1782         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1783
1784         /* Return here if interrupt is disabled */
1785         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1786                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1787                 return IRQ_HANDLED;
1788         }
1789
1790 #ifdef BNX2X_STOP_ON_ERROR
1791         if (unlikely(bp->panic))
1792                 return IRQ_HANDLED;
1793 #endif
1794
1795         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1796                 struct bnx2x_fastpath *fp = &bp->fp[i];
1797
1798                 mask = 0x2 << fp->sb_id;
1799                 if (status & mask) {
1800                         /* Handle Rx and Tx according to SB id */
1801                         prefetch(fp->rx_cons_sb);
1802                         prefetch(&fp->status_blk->u_status_block.
1803                                                 status_block_index);
1804                         prefetch(fp->tx_cons_sb);
1805                         prefetch(&fp->status_blk->c_status_block.
1806                                                 status_block_index);
1807                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1808                         status &= ~mask;
1809                 }
1810         }
1811
1812 #ifdef BCM_CNIC
1813         mask = 0x2 << CNIC_SB_ID(bp);
1814         if (status & (mask | 0x1)) {
1815                 struct cnic_ops *c_ops = NULL;
1816
1817                 rcu_read_lock();
1818                 c_ops = rcu_dereference(bp->cnic_ops);
1819                 if (c_ops)
1820                         c_ops->cnic_handler(bp->cnic_data, NULL);
1821                 rcu_read_unlock();
1822
1823                 status &= ~mask;
1824         }
1825 #endif
1826
1827         if (unlikely(status & 0x1)) {
1828                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1829
1830                 status &= ~0x1;
1831                 if (!status)
1832                         return IRQ_HANDLED;
1833         }
1834
1835         if (status)
1836                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1837                    status);
1838
1839         return IRQ_HANDLED;
1840 }
1841
1842 /* end of fast path */
1843
1844 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1845
1846 /* Link */
1847
1848 /*
1849  * General service functions
1850  */
1851
1852 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1853 {
1854         u32 lock_status;
1855         u32 resource_bit = (1 << resource);
1856         int func = BP_FUNC(bp);
1857         u32 hw_lock_control_reg;
1858         int cnt;
1859
1860         /* Validating that the resource is within range */
1861         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1862                 DP(NETIF_MSG_HW,
1863                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1864                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1865                 return -EINVAL;
1866         }
1867
1868         if (func <= 5) {
1869                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1870         } else {
1871                 hw_lock_control_reg =
1872                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1873         }
1874
1875         /* Validating that the resource is not already taken */
1876         lock_status = REG_RD(bp, hw_lock_control_reg);
1877         if (lock_status & resource_bit) {
1878                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1879                    lock_status, resource_bit);
1880                 return -EEXIST;
1881         }
1882
1883         /* Try for 5 second every 5ms */
1884         for (cnt = 0; cnt < 1000; cnt++) {
1885                 /* Try to acquire the lock */
1886                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1887                 lock_status = REG_RD(bp, hw_lock_control_reg);
1888                 if (lock_status & resource_bit)
1889                         return 0;
1890
1891                 msleep(5);
1892         }
1893         DP(NETIF_MSG_HW, "Timeout\n");
1894         return -EAGAIN;
1895 }
1896
1897 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1898 {
1899         u32 lock_status;
1900         u32 resource_bit = (1 << resource);
1901         int func = BP_FUNC(bp);
1902         u32 hw_lock_control_reg;
1903
1904         /* Validating that the resource is within range */
1905         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1906                 DP(NETIF_MSG_HW,
1907                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1908                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1909                 return -EINVAL;
1910         }
1911
1912         if (func <= 5) {
1913                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1914         } else {
1915                 hw_lock_control_reg =
1916                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1917         }
1918
1919         /* Validating that the resource is currently taken */
1920         lock_status = REG_RD(bp, hw_lock_control_reg);
1921         if (!(lock_status & resource_bit)) {
1922                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1923                    lock_status, resource_bit);
1924                 return -EFAULT;
1925         }
1926
1927         REG_WR(bp, hw_lock_control_reg, resource_bit);
1928         return 0;
1929 }
1930
1931 /* HW Lock for shared dual port PHYs */
1932 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1933 {
1934         mutex_lock(&bp->port.phy_mutex);
1935
1936         if (bp->port.need_hw_lock)
1937                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1938 }
1939
1940 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1941 {
1942         if (bp->port.need_hw_lock)
1943                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1944
1945         mutex_unlock(&bp->port.phy_mutex);
1946 }
1947
1948 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1949 {
1950         /* The GPIO should be swapped if swap register is set and active */
1951         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1952                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1953         int gpio_shift = gpio_num +
1954                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1955         u32 gpio_mask = (1 << gpio_shift);
1956         u32 gpio_reg;
1957         int value;
1958
1959         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1960                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1961                 return -EINVAL;
1962         }
1963
1964         /* read GPIO value */
1965         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1966
1967         /* get the requested pin value */
1968         if ((gpio_reg & gpio_mask) == gpio_mask)
1969                 value = 1;
1970         else
1971                 value = 0;
1972
1973         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1974
1975         return value;
1976 }
1977
1978 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1979 {
1980         /* The GPIO should be swapped if swap register is set and active */
1981         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1982                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1983         int gpio_shift = gpio_num +
1984                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1985         u32 gpio_mask = (1 << gpio_shift);
1986         u32 gpio_reg;
1987
1988         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1989                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1990                 return -EINVAL;
1991         }
1992
1993         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1994         /* read GPIO and mask except the float bits */
1995         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1996
1997         switch (mode) {
1998         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1999                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2000                    gpio_num, gpio_shift);
2001                 /* clear FLOAT and set CLR */
2002                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2003                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2004                 break;
2005
2006         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2007                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2008                    gpio_num, gpio_shift);
2009                 /* clear FLOAT and set SET */
2010                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2011                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2012                 break;
2013
2014         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2015                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2016                    gpio_num, gpio_shift);
2017                 /* set FLOAT */
2018                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2019                 break;
2020
2021         default:
2022                 break;
2023         }
2024
2025         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2026         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2027
2028         return 0;
2029 }
2030
2031 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2032 {
2033         /* The GPIO should be swapped if swap register is set and active */
2034         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2035                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2036         int gpio_shift = gpio_num +
2037                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2038         u32 gpio_mask = (1 << gpio_shift);
2039         u32 gpio_reg;
2040
2041         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2042                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2043                 return -EINVAL;
2044         }
2045
2046         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2047         /* read GPIO int */
2048         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2049
2050         switch (mode) {
2051         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2052                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2053                                    "output low\n", gpio_num, gpio_shift);
2054                 /* clear SET and set CLR */
2055                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2056                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2057                 break;
2058
2059         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2060                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2061                                    "output high\n", gpio_num, gpio_shift);
2062                 /* clear CLR and set SET */
2063                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2064                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2065                 break;
2066
2067         default:
2068                 break;
2069         }
2070
2071         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2072         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2073
2074         return 0;
2075 }
2076
2077 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2078 {
2079         u32 spio_mask = (1 << spio_num);
2080         u32 spio_reg;
2081
2082         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2083             (spio_num > MISC_REGISTERS_SPIO_7)) {
2084                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2085                 return -EINVAL;
2086         }
2087
2088         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2089         /* read SPIO and mask except the float bits */
2090         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2091
2092         switch (mode) {
2093         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2094                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2095                 /* clear FLOAT and set CLR */
2096                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2097                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2098                 break;
2099
2100         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2101                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2102                 /* clear FLOAT and set SET */
2103                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2104                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2105                 break;
2106
2107         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2108                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2109                 /* set FLOAT */
2110                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2111                 break;
2112
2113         default:
2114                 break;
2115         }
2116
2117         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2118         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2119
2120         return 0;
2121 }
2122
2123 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2124 {
2125         switch (bp->link_vars.ieee_fc &
2126                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2127         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2128                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2129                                           ADVERTISED_Pause);
2130                 break;
2131
2132         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2133                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2134                                          ADVERTISED_Pause);
2135                 break;
2136
2137         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2138                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2139                 break;
2140
2141         default:
2142                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2143                                           ADVERTISED_Pause);
2144                 break;
2145         }
2146 }
2147
2148 static void bnx2x_link_report(struct bnx2x *bp)
2149 {
2150         if (bp->flags & MF_FUNC_DIS) {
2151                 netif_carrier_off(bp->dev);
2152                 netdev_err(bp->dev, "NIC Link is Down\n");
2153                 return;
2154         }
2155
2156         if (bp->link_vars.link_up) {
2157                 u16 line_speed;
2158
2159                 if (bp->state == BNX2X_STATE_OPEN)
2160                         netif_carrier_on(bp->dev);
2161                 netdev_info(bp->dev, "NIC Link is Up, ");
2162
2163                 line_speed = bp->link_vars.line_speed;
2164                 if (IS_E1HMF(bp)) {
2165                         u16 vn_max_rate;
2166
2167                         vn_max_rate =
2168                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2169                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2170                         if (vn_max_rate < line_speed)
2171                                 line_speed = vn_max_rate;
2172                 }
2173                 pr_cont("%d Mbps ", line_speed);
2174
2175                 if (bp->link_vars.duplex == DUPLEX_FULL)
2176                         pr_cont("full duplex");
2177                 else
2178                         pr_cont("half duplex");
2179
2180                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2181                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2182                                 pr_cont(", receive ");
2183                                 if (bp->link_vars.flow_ctrl &
2184                                     BNX2X_FLOW_CTRL_TX)
2185                                         pr_cont("& transmit ");
2186                         } else {
2187                                 pr_cont(", transmit ");
2188                         }
2189                         pr_cont("flow control ON");
2190                 }
2191                 pr_cont("\n");
2192
2193         } else { /* link_down */
2194                 netif_carrier_off(bp->dev);
2195                 netdev_err(bp->dev, "NIC Link is Down\n");
2196         }
2197 }
2198
2199 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2200 {
2201         if (!BP_NOMCP(bp)) {
2202                 u8 rc;
2203
2204                 /* Initialize link parameters structure variables */
2205                 /* It is recommended to turn off RX FC for jumbo frames
2206                    for better performance */
2207                 if (bp->dev->mtu > 5000)
2208                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2209                 else
2210                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2211
2212                 bnx2x_acquire_phy_lock(bp);
2213
2214                 if (load_mode == LOAD_DIAG)
2215                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2216
2217                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2218
2219                 bnx2x_release_phy_lock(bp);
2220
2221                 bnx2x_calc_fc_adv(bp);
2222
2223                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2224                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2225                         bnx2x_link_report(bp);
2226                 }
2227
2228                 return rc;
2229         }
2230         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2231         return -EINVAL;
2232 }
2233
2234 static void bnx2x_link_set(struct bnx2x *bp)
2235 {
2236         if (!BP_NOMCP(bp)) {
2237                 bnx2x_acquire_phy_lock(bp);
2238                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2239                 bnx2x_release_phy_lock(bp);
2240
2241                 bnx2x_calc_fc_adv(bp);
2242         } else
2243                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2244 }
2245
2246 static void bnx2x__link_reset(struct bnx2x *bp)
2247 {
2248         if (!BP_NOMCP(bp)) {
2249                 bnx2x_acquire_phy_lock(bp);
2250                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2251                 bnx2x_release_phy_lock(bp);
2252         } else
2253                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2254 }
2255
2256 static u8 bnx2x_link_test(struct bnx2x *bp)
2257 {
2258         u8 rc;
2259
2260         bnx2x_acquire_phy_lock(bp);
2261         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2262         bnx2x_release_phy_lock(bp);
2263
2264         return rc;
2265 }
2266
2267 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2268 {
2269         u32 r_param = bp->link_vars.line_speed / 8;
2270         u32 fair_periodic_timeout_usec;
2271         u32 t_fair;
2272
2273         memset(&(bp->cmng.rs_vars), 0,
2274                sizeof(struct rate_shaping_vars_per_port));
2275         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2276
2277         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2278         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2279
2280         /* this is the threshold below which no timer arming will occur
2281            1.25 coefficient is for the threshold to be a little bigger
2282            than the real time, to compensate for timer in-accuracy */
2283         bp->cmng.rs_vars.rs_threshold =
2284                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2285
2286         /* resolution of fairness timer */
2287         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2288         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2289         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2290
2291         /* this is the threshold below which we won't arm the timer anymore */
2292         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2293
2294         /* we multiply by 1e3/8 to get bytes/msec.
2295            We don't want the credits to pass a credit
2296            of the t_fair*FAIR_MEM (algorithm resolution) */
2297         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2298         /* since each tick is 4 usec */
2299         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2300 }
2301
2302 /* Calculates the sum of vn_min_rates.
2303    It's needed for further normalizing of the min_rates.
2304    Returns:
2305      sum of vn_min_rates.
2306        or
2307      0 - if all the min_rates are 0.
2308      In the later case fainess algorithm should be deactivated.
2309      If not all min_rates are zero then those that are zeroes will be set to 1.
2310  */
2311 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2312 {
2313         int all_zero = 1;
2314         int port = BP_PORT(bp);
2315         int vn;
2316
2317         bp->vn_weight_sum = 0;
2318         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2319                 int func = 2*vn + port;
2320                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2321                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2322                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2323
2324                 /* Skip hidden vns */
2325                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2326                         continue;
2327
2328                 /* If min rate is zero - set it to 1 */
2329                 if (!vn_min_rate)
2330                         vn_min_rate = DEF_MIN_RATE;
2331                 else
2332                         all_zero = 0;
2333
2334                 bp->vn_weight_sum += vn_min_rate;
2335         }
2336
2337         /* ... only if all min rates are zeros - disable fairness */
2338         if (all_zero) {
2339                 bp->cmng.flags.cmng_enables &=
2340                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2341                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2342                    "  fairness will be disabled\n");
2343         } else
2344                 bp->cmng.flags.cmng_enables |=
2345                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2346 }
2347
2348 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2349 {
2350         struct rate_shaping_vars_per_vn m_rs_vn;
2351         struct fairness_vars_per_vn m_fair_vn;
2352         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2353         u16 vn_min_rate, vn_max_rate;
2354         int i;
2355
2356         /* If function is hidden - set min and max to zeroes */
2357         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2358                 vn_min_rate = 0;
2359                 vn_max_rate = 0;
2360
2361         } else {
2362                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2363                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2364                 /* If min rate is zero - set it to 1 */
2365                 if (!vn_min_rate)
2366                         vn_min_rate = DEF_MIN_RATE;
2367                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2368                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2369         }
2370         DP(NETIF_MSG_IFUP,
2371            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2372            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2373
2374         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2375         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2376
2377         /* global vn counter - maximal Mbps for this vn */
2378         m_rs_vn.vn_counter.rate = vn_max_rate;
2379
2380         /* quota - number of bytes transmitted in this period */
2381         m_rs_vn.vn_counter.quota =
2382                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2383
2384         if (bp->vn_weight_sum) {
2385                 /* credit for each period of the fairness algorithm:
2386                    number of bytes in T_FAIR (the vn share the port rate).
2387                    vn_weight_sum should not be larger than 10000, thus
2388                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2389                    than zero */
2390                 m_fair_vn.vn_credit_delta =
2391                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2392                                                  (8 * bp->vn_weight_sum))),
2393                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2394                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2395                    m_fair_vn.vn_credit_delta);
2396         }
2397
2398         /* Store it to internal memory */
2399         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2400                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2401                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2402                        ((u32 *)(&m_rs_vn))[i]);
2403
2404         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2405                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2406                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2407                        ((u32 *)(&m_fair_vn))[i]);
2408 }
2409
2410
2411 /* This function is called upon link interrupt */
2412 static void bnx2x_link_attn(struct bnx2x *bp)
2413 {
2414         /* Make sure that we are synced with the current statistics */
2415         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2416
2417         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2418
2419         if (bp->link_vars.link_up) {
2420
2421                 /* dropless flow control */
2422                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2423                         int port = BP_PORT(bp);
2424                         u32 pause_enabled = 0;
2425
2426                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2427                                 pause_enabled = 1;
2428
2429                         REG_WR(bp, BAR_USTRORM_INTMEM +
2430                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2431                                pause_enabled);
2432                 }
2433
2434                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2435                         struct host_port_stats *pstats;
2436
2437                         pstats = bnx2x_sp(bp, port_stats);
2438                         /* reset old bmac stats */
2439                         memset(&(pstats->mac_stx[0]), 0,
2440                                sizeof(struct mac_stx));
2441                 }
2442                 if (bp->state == BNX2X_STATE_OPEN)
2443                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2444         }
2445
2446         /* indicate link status */
2447         bnx2x_link_report(bp);
2448
2449         if (IS_E1HMF(bp)) {
2450                 int port = BP_PORT(bp);
2451                 int func;
2452                 int vn;
2453
2454                 /* Set the attention towards other drivers on the same port */
2455                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2456                         if (vn == BP_E1HVN(bp))
2457                                 continue;
2458
2459                         func = ((vn << 1) | port);
2460                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2461                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2462                 }
2463
2464                 if (bp->link_vars.link_up) {
2465                         int i;
2466
2467                         /* Init rate shaping and fairness contexts */
2468                         bnx2x_init_port_minmax(bp);
2469
2470                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2471                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2472
2473                         /* Store it to internal memory */
2474                         for (i = 0;
2475                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2476                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2477                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2478                                        ((u32 *)(&bp->cmng))[i]);
2479                 }
2480         }
2481 }
2482
2483 static void bnx2x__link_status_update(struct bnx2x *bp)
2484 {
2485         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2486                 return;
2487
2488         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2489
2490         if (bp->link_vars.link_up)
2491                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2492         else
2493                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2494
2495         bnx2x_calc_vn_weight_sum(bp);
2496
2497         /* indicate link status */
2498         bnx2x_link_report(bp);
2499 }
2500
2501 static void bnx2x_pmf_update(struct bnx2x *bp)
2502 {
2503         int port = BP_PORT(bp);
2504         u32 val;
2505
2506         bp->port.pmf = 1;
2507         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2508
2509         /* enable nig attention */
2510         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2511         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2512         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2513
2514         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2515 }
2516
2517 /* end of Link */
2518
2519 /* slow path */
2520
2521 /*
2522  * General service functions
2523  */
2524
2525 /* send the MCP a request, block until there is a reply */
2526 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2527 {
2528         int func = BP_FUNC(bp);
2529         u32 seq = ++bp->fw_seq;
2530         u32 rc = 0;
2531         u32 cnt = 1;
2532         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2533
2534         mutex_lock(&bp->fw_mb_mutex);
2535         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2536         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2537
2538         do {
2539                 /* let the FW do it's magic ... */
2540                 msleep(delay);
2541
2542                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2543
2544                 /* Give the FW up to 5 second (500*10ms) */
2545         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2546
2547         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2548            cnt*delay, rc, seq);
2549
2550         /* is this a reply to our command? */
2551         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2552                 rc &= FW_MSG_CODE_MASK;
2553         else {
2554                 /* FW BUG! */
2555                 BNX2X_ERR("FW failed to respond!\n");
2556                 bnx2x_fw_dump(bp);
2557                 rc = 0;
2558         }
2559         mutex_unlock(&bp->fw_mb_mutex);
2560
2561         return rc;
2562 }
2563
2564 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2565 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2566 static void bnx2x_set_rx_mode(struct net_device *dev);
2567
2568 static void bnx2x_e1h_disable(struct bnx2x *bp)
2569 {
2570         int port = BP_PORT(bp);
2571
2572         netif_tx_disable(bp->dev);
2573
2574         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2575
2576         netif_carrier_off(bp->dev);
2577 }
2578
2579 static void bnx2x_e1h_enable(struct bnx2x *bp)
2580 {
2581         int port = BP_PORT(bp);
2582
2583         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2584
2585         /* Tx queue should be only reenabled */
2586         netif_tx_wake_all_queues(bp->dev);
2587
2588         /*
2589          * Should not call netif_carrier_on since it will be called if the link
2590          * is up when checking for link state
2591          */
2592 }
2593
2594 static void bnx2x_update_min_max(struct bnx2x *bp)
2595 {
2596         int port = BP_PORT(bp);
2597         int vn, i;
2598
2599         /* Init rate shaping and fairness contexts */
2600         bnx2x_init_port_minmax(bp);
2601
2602         bnx2x_calc_vn_weight_sum(bp);
2603
2604         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2605                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2606
2607         if (bp->port.pmf) {
2608                 int func;
2609
2610                 /* Set the attention towards other drivers on the same port */
2611                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2612                         if (vn == BP_E1HVN(bp))
2613                                 continue;
2614
2615                         func = ((vn << 1) | port);
2616                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2617                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2618                 }
2619
2620                 /* Store it to internal memory */
2621                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2622                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2623                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2624                                ((u32 *)(&bp->cmng))[i]);
2625         }
2626 }
2627
2628 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2629 {
2630         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2631
2632         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2633
2634                 /*
2635                  * This is the only place besides the function initialization
2636                  * where the bp->flags can change so it is done without any
2637                  * locks
2638                  */
2639                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2640                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2641                         bp->flags |= MF_FUNC_DIS;
2642
2643                         bnx2x_e1h_disable(bp);
2644                 } else {
2645                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2646                         bp->flags &= ~MF_FUNC_DIS;
2647
2648                         bnx2x_e1h_enable(bp);
2649                 }
2650                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2651         }
2652         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2653
2654                 bnx2x_update_min_max(bp);
2655                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2656         }
2657
2658         /* Report results to MCP */
2659         if (dcc_event)
2660                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2661         else
2662                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2663 }
2664
2665 /* must be called under the spq lock */
2666 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2667 {
2668         struct eth_spe *next_spe = bp->spq_prod_bd;
2669
2670         if (bp->spq_prod_bd == bp->spq_last_bd) {
2671                 bp->spq_prod_bd = bp->spq;
2672                 bp->spq_prod_idx = 0;
2673                 DP(NETIF_MSG_TIMER, "end of spq\n");
2674         } else {
2675                 bp->spq_prod_bd++;
2676                 bp->spq_prod_idx++;
2677         }
2678         return next_spe;
2679 }
2680
2681 /* must be called under the spq lock */
2682 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2683 {
2684         int func = BP_FUNC(bp);
2685
2686         /* Make sure that BD data is updated before writing the producer */
2687         wmb();
2688
2689         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2690                bp->spq_prod_idx);
2691         mmiowb();
2692 }
2693
2694 /* the slow path queue is odd since completions arrive on the fastpath ring */
2695 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2696                          u32 data_hi, u32 data_lo, int common)
2697 {
2698         struct eth_spe *spe;
2699
2700         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2701            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2702            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2703            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2704            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2705
2706 #ifdef BNX2X_STOP_ON_ERROR
2707         if (unlikely(bp->panic))
2708                 return -EIO;
2709 #endif
2710
2711         spin_lock_bh(&bp->spq_lock);
2712
2713         if (!bp->spq_left) {
2714                 BNX2X_ERR("BUG! SPQ ring full!\n");
2715                 spin_unlock_bh(&bp->spq_lock);
2716                 bnx2x_panic();
2717                 return -EBUSY;
2718         }
2719
2720         spe = bnx2x_sp_get_next(bp);
2721
2722         /* CID needs port number to be encoded int it */
2723         spe->hdr.conn_and_cmd_data =
2724                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2725                                      HW_CID(bp, cid)));
2726         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2727         if (common)
2728                 spe->hdr.type |=
2729                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2730
2731         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2732         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2733
2734         bp->spq_left--;
2735
2736         bnx2x_sp_prod_update(bp);
2737         spin_unlock_bh(&bp->spq_lock);
2738         return 0;
2739 }
2740
2741 /* acquire split MCP access lock register */
2742 static int bnx2x_acquire_alr(struct bnx2x *bp)
2743 {
2744         u32 i, j, val;
2745         int rc = 0;
2746
2747         might_sleep();
2748         i = 100;
2749         for (j = 0; j < i*10; j++) {
2750                 val = (1UL << 31);
2751                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2752                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2753                 if (val & (1L << 31))
2754                         break;
2755
2756                 msleep(5);
2757         }
2758         if (!(val & (1L << 31))) {
2759                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2760                 rc = -EBUSY;
2761         }
2762
2763         return rc;
2764 }
2765
2766 /* release split MCP access lock register */
2767 static void bnx2x_release_alr(struct bnx2x *bp)
2768 {
2769         u32 val = 0;
2770
2771         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2772 }
2773
2774 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2775 {
2776         struct host_def_status_block *def_sb = bp->def_status_blk;
2777         u16 rc = 0;
2778
2779         barrier(); /* status block is written to by the chip */
2780         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2781                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2782                 rc |= 1;
2783         }
2784         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2785                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2786                 rc |= 2;
2787         }
2788         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2789                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2790                 rc |= 4;
2791         }
2792         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2793                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2794                 rc |= 8;
2795         }
2796         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2797                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2798                 rc |= 16;
2799         }
2800         return rc;
2801 }
2802
2803 /*
2804  * slow path service functions
2805  */
2806
2807 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2808 {
2809         int port = BP_PORT(bp);
2810         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2811                        COMMAND_REG_ATTN_BITS_SET);
2812         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2813                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2814         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2815                                        NIG_REG_MASK_INTERRUPT_PORT0;
2816         u32 aeu_mask;
2817         u32 nig_mask = 0;
2818
2819         if (bp->attn_state & asserted)
2820                 BNX2X_ERR("IGU ERROR\n");
2821
2822         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2823         aeu_mask = REG_RD(bp, aeu_addr);
2824
2825         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2826            aeu_mask, asserted);
2827         aeu_mask &= ~(asserted & 0xff);
2828         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2829
2830         REG_WR(bp, aeu_addr, aeu_mask);
2831         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2832
2833         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2834         bp->attn_state |= asserted;
2835         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2836
2837         if (asserted & ATTN_HARD_WIRED_MASK) {
2838                 if (asserted & ATTN_NIG_FOR_FUNC) {
2839
2840                         bnx2x_acquire_phy_lock(bp);
2841
2842                         /* save nig interrupt mask */
2843                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2844                         REG_WR(bp, nig_int_mask_addr, 0);
2845
2846                         bnx2x_link_attn(bp);
2847
2848                         /* handle unicore attn? */
2849                 }
2850                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2851                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2852
2853                 if (asserted & GPIO_2_FUNC)
2854                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2855
2856                 if (asserted & GPIO_3_FUNC)
2857                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2858
2859                 if (asserted & GPIO_4_FUNC)
2860                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2861
2862                 if (port == 0) {
2863                         if (asserted & ATTN_GENERAL_ATTN_1) {
2864                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2865                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2866                         }
2867                         if (asserted & ATTN_GENERAL_ATTN_2) {
2868                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2869                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2870                         }
2871                         if (asserted & ATTN_GENERAL_ATTN_3) {
2872                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2873                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2874                         }
2875                 } else {
2876                         if (asserted & ATTN_GENERAL_ATTN_4) {
2877                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2878                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2879                         }
2880                         if (asserted & ATTN_GENERAL_ATTN_5) {
2881                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2882                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2883                         }
2884                         if (asserted & ATTN_GENERAL_ATTN_6) {
2885                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2886                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2887                         }
2888                 }
2889
2890         } /* if hardwired */
2891
2892         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2893            asserted, hc_addr);
2894         REG_WR(bp, hc_addr, asserted);
2895
2896         /* now set back the mask */
2897         if (asserted & ATTN_NIG_FOR_FUNC) {
2898                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2899                 bnx2x_release_phy_lock(bp);
2900         }
2901 }
2902
2903 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2904 {
2905         int port = BP_PORT(bp);
2906
2907         /* mark the failure */
2908         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2909         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2910         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2911                  bp->link_params.ext_phy_config);
2912
2913         /* log the failure */
2914         netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2915                    "Please contact Dell Support for assistance.\n");
2916 }
2917
2918 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2919 {
2920         int port = BP_PORT(bp);
2921         int reg_offset;
2922         u32 val, swap_val, swap_override;
2923
2924         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2925                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2926
2927         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2928
2929                 val = REG_RD(bp, reg_offset);
2930                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2931                 REG_WR(bp, reg_offset, val);
2932
2933                 BNX2X_ERR("SPIO5 hw attention\n");
2934
2935                 /* Fan failure attention */
2936                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2937                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2938                         /* Low power mode is controlled by GPIO 2 */
2939                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2940                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2941                         /* The PHY reset is controlled by GPIO 1 */
2942                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2944                         break;
2945
2946                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2947                         /* The PHY reset is controlled by GPIO 1 */
2948                         /* fake the port number to cancel the swap done in
2949                            set_gpio() */
2950                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2951                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2952                         port = (swap_val && swap_override) ^ 1;
2953                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2954                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2955                         break;
2956
2957                 default:
2958                         break;
2959                 }
2960                 bnx2x_fan_failure(bp);
2961         }
2962
2963         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2964                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2965                 bnx2x_acquire_phy_lock(bp);
2966                 bnx2x_handle_module_detect_int(&bp->link_params);
2967                 bnx2x_release_phy_lock(bp);
2968         }
2969
2970         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2971
2972                 val = REG_RD(bp, reg_offset);
2973                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2974                 REG_WR(bp, reg_offset, val);
2975
2976                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2977                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2978                 bnx2x_panic();
2979         }
2980 }
2981
2982 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2983 {
2984         u32 val;
2985
2986         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2987
2988                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2989                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2990                 /* DORQ discard attention */
2991                 if (val & 0x2)
2992                         BNX2X_ERR("FATAL error from DORQ\n");
2993         }
2994
2995         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2996
2997                 int port = BP_PORT(bp);
2998                 int reg_offset;
2999
3000                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3001                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3002
3003                 val = REG_RD(bp, reg_offset);
3004                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3005                 REG_WR(bp, reg_offset, val);
3006
3007                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3008                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3009                 bnx2x_panic();
3010         }
3011 }
3012
3013 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3014 {
3015         u32 val;
3016
3017         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3018
3019                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3020                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3021                 /* CFC error attention */
3022                 if (val & 0x2)
3023                         BNX2X_ERR("FATAL error from CFC\n");
3024         }
3025
3026         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3027
3028                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3029                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3030                 /* RQ_USDMDP_FIFO_OVERFLOW */
3031                 if (val & 0x18000)
3032                         BNX2X_ERR("FATAL error from PXP\n");
3033         }
3034
3035         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3036
3037                 int port = BP_PORT(bp);
3038                 int reg_offset;
3039
3040                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3041                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3042
3043                 val = REG_RD(bp, reg_offset);
3044                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3045                 REG_WR(bp, reg_offset, val);
3046
3047                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3048                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3049                 bnx2x_panic();
3050         }
3051 }
3052
3053 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3054 {
3055         u32 val;
3056
3057         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3058
3059                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3060                         int func = BP_FUNC(bp);
3061
3062                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3063                         bp->mf_config = SHMEM_RD(bp,
3064                                            mf_cfg.func_mf_config[func].config);
3065                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3066                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3067                                 bnx2x_dcc_event(bp,
3068                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3069                         bnx2x__link_status_update(bp);
3070                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3071                                 bnx2x_pmf_update(bp);
3072
3073                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3074
3075                         BNX2X_ERR("MC assert!\n");
3076                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3077                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3078                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3079                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3080                         bnx2x_panic();
3081
3082                 } else if (attn & BNX2X_MCP_ASSERT) {
3083
3084                         BNX2X_ERR("MCP assert!\n");
3085                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3086                         bnx2x_fw_dump(bp);
3087
3088                 } else
3089                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3090         }
3091
3092         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3093                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3094                 if (attn & BNX2X_GRC_TIMEOUT) {
3095                         val = CHIP_IS_E1H(bp) ?
3096                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3097                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3098                 }
3099                 if (attn & BNX2X_GRC_RSV) {
3100                         val = CHIP_IS_E1H(bp) ?
3101                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3102                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3103                 }
3104                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3105         }
3106 }
3107
3108 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3109 {
3110         struct attn_route attn;
3111         struct attn_route group_mask;
3112         int port = BP_PORT(bp);
3113         int index;
3114         u32 reg_addr;
3115         u32 val;
3116         u32 aeu_mask;
3117
3118         /* need to take HW lock because MCP or other port might also
3119            try to handle this event */
3120         bnx2x_acquire_alr(bp);
3121
3122         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3123         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3124         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3125         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3126         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3127            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3128
3129         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3130                 if (deasserted & (1 << index)) {
3131                         group_mask = bp->attn_group[index];
3132
3133                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3134                            index, group_mask.sig[0], group_mask.sig[1],
3135                            group_mask.sig[2], group_mask.sig[3]);
3136
3137                         bnx2x_attn_int_deasserted3(bp,
3138                                         attn.sig[3] & group_mask.sig[3]);
3139                         bnx2x_attn_int_deasserted1(bp,
3140                                         attn.sig[1] & group_mask.sig[1]);
3141                         bnx2x_attn_int_deasserted2(bp,
3142                                         attn.sig[2] & group_mask.sig[2]);
3143                         bnx2x_attn_int_deasserted0(bp,
3144                                         attn.sig[0] & group_mask.sig[0]);
3145
3146                         if ((attn.sig[0] & group_mask.sig[0] &
3147                                                 HW_PRTY_ASSERT_SET_0) ||
3148                             (attn.sig[1] & group_mask.sig[1] &
3149                                                 HW_PRTY_ASSERT_SET_1) ||
3150                             (attn.sig[2] & group_mask.sig[2] &
3151                                                 HW_PRTY_ASSERT_SET_2))
3152                                 BNX2X_ERR("FATAL HW block parity attention\n");
3153                 }
3154         }
3155
3156         bnx2x_release_alr(bp);
3157
3158         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3159
3160         val = ~deasserted;
3161         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3162            val, reg_addr);
3163         REG_WR(bp, reg_addr, val);
3164
3165         if (~bp->attn_state & deasserted)
3166                 BNX2X_ERR("IGU ERROR\n");
3167
3168         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3169                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3170
3171         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3172         aeu_mask = REG_RD(bp, reg_addr);
3173
3174         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3175            aeu_mask, deasserted);
3176         aeu_mask |= (deasserted & 0xff);
3177         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3178
3179         REG_WR(bp, reg_addr, aeu_mask);
3180         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3181
3182         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3183         bp->attn_state &= ~deasserted;
3184         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3185 }
3186
3187 static void bnx2x_attn_int(struct bnx2x *bp)
3188 {
3189         /* read local copy of bits */
3190         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3191                                                                 attn_bits);
3192         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3193                                                                 attn_bits_ack);
3194         u32 attn_state = bp->attn_state;
3195
3196         /* look for changed bits */
3197         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3198         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3199
3200         DP(NETIF_MSG_HW,
3201            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3202            attn_bits, attn_ack, asserted, deasserted);
3203
3204         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3205                 BNX2X_ERR("BAD attention state\n");
3206
3207         /* handle bits that were raised */
3208         if (asserted)
3209                 bnx2x_attn_int_asserted(bp, asserted);
3210
3211         if (deasserted)
3212                 bnx2x_attn_int_deasserted(bp, deasserted);
3213 }
3214
3215 static void bnx2x_sp_task(struct work_struct *work)
3216 {
3217         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3218         u16 status;
3219
3220
3221         /* Return here if interrupt is disabled */
3222         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3223                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3224                 return;
3225         }
3226
3227         status = bnx2x_update_dsb_idx(bp);
3228 /*      if (status == 0)                                     */
3229 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3230
3231         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3232
3233         /* HW attentions */
3234         if (status & 0x1)
3235                 bnx2x_attn_int(bp);
3236
3237         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3238                      IGU_INT_NOP, 1);
3239         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3240                      IGU_INT_NOP, 1);
3241         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3242                      IGU_INT_NOP, 1);
3243         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3244                      IGU_INT_NOP, 1);
3245         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3246                      IGU_INT_ENABLE, 1);
3247
3248 }
3249
3250 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3251 {
3252         struct net_device *dev = dev_instance;
3253         struct bnx2x *bp = netdev_priv(dev);
3254
3255         /* Return here if interrupt is disabled */
3256         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3257                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3258                 return IRQ_HANDLED;
3259         }
3260
3261         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3262
3263 #ifdef BNX2X_STOP_ON_ERROR
3264         if (unlikely(bp->panic))
3265                 return IRQ_HANDLED;
3266 #endif
3267
3268 #ifdef BCM_CNIC
3269         {
3270                 struct cnic_ops *c_ops;
3271
3272                 rcu_read_lock();
3273                 c_ops = rcu_dereference(bp->cnic_ops);
3274                 if (c_ops)
3275                         c_ops->cnic_handler(bp->cnic_data, NULL);
3276                 rcu_read_unlock();
3277         }
3278 #endif
3279         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3280
3281         return IRQ_HANDLED;
3282 }
3283
3284 /* end of slow path */
3285
3286 /* Statistics */
3287
3288 /****************************************************************************
3289 * Macros
3290 ****************************************************************************/
3291
3292 /* sum[hi:lo] += add[hi:lo] */
3293 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3294         do { \
3295                 s_lo += a_lo; \
3296                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3297         } while (0)
3298
3299 /* difference = minuend - subtrahend */
3300 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3301         do { \
3302                 if (m_lo < s_lo) { \
3303                         /* underflow */ \
3304                         d_hi = m_hi - s_hi; \
3305                         if (d_hi > 0) { \
3306                                 /* we can 'loan' 1 */ \
3307                                 d_hi--; \
3308                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3309                         } else { \
3310                                 /* m_hi <= s_hi */ \
3311                                 d_hi = 0; \
3312                                 d_lo = 0; \
3313                         } \
3314                 } else { \
3315                         /* m_lo >= s_lo */ \
3316                         if (m_hi < s_hi) { \
3317                                 d_hi = 0; \
3318                                 d_lo = 0; \
3319                         } else { \
3320                                 /* m_hi >= s_hi */ \
3321                                 d_hi = m_hi - s_hi; \
3322                                 d_lo = m_lo - s_lo; \
3323                         } \
3324                 } \
3325         } while (0)
3326
3327 #define UPDATE_STAT64(s, t) \
3328         do { \
3329                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3330                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3331                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3332                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3333                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3334                        pstats->mac_stx[1].t##_lo, diff.lo); \
3335         } while (0)
3336
3337 #define UPDATE_STAT64_NIG(s, t) \
3338         do { \
3339                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3340                         diff.lo, new->s##_lo, old->s##_lo); \
3341                 ADD_64(estats->t##_hi, diff.hi, \
3342                        estats->t##_lo, diff.lo); \
3343         } while (0)
3344
3345 /* sum[hi:lo] += add */
3346 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3347         do { \
3348                 s_lo += a; \
3349                 s_hi += (s_lo < a) ? 1 : 0; \
3350         } while (0)
3351
3352 #define UPDATE_EXTEND_STAT(s) \
3353         do { \
3354                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3355                               pstats->mac_stx[1].s##_lo, \
3356                               new->s); \
3357         } while (0)
3358
3359 #define UPDATE_EXTEND_TSTAT(s, t) \
3360         do { \
3361                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3362                 old_tclient->s = tclient->s; \
3363                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3364         } while (0)
3365
3366 #define UPDATE_EXTEND_USTAT(s, t) \
3367         do { \
3368                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3369                 old_uclient->s = uclient->s; \
3370                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3371         } while (0)
3372
3373 #define UPDATE_EXTEND_XSTAT(s, t) \
3374         do { \
3375                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3376                 old_xclient->s = xclient->s; \
3377                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3378         } while (0)
3379
3380 /* minuend -= subtrahend */
3381 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3382         do { \
3383                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3384         } while (0)
3385
3386 /* minuend[hi:lo] -= subtrahend */
3387 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3388         do { \
3389                 SUB_64(m_hi, 0, m_lo, s); \
3390         } while (0)
3391
3392 #define SUB_EXTEND_USTAT(s, t) \
3393         do { \
3394                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3395                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3396         } while (0)
3397
3398 /*
3399  * General service functions
3400  */
3401
3402 static inline long bnx2x_hilo(u32 *hiref)
3403 {
3404         u32 lo = *(hiref + 1);
3405 #if (BITS_PER_LONG == 64)
3406         u32 hi = *hiref;
3407
3408         return HILO_U64(hi, lo);
3409 #else
3410         return lo;
3411 #endif
3412 }
3413
3414 /*
3415  * Init service functions
3416  */
3417
3418 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3419 {
3420         if (!bp->stats_pending) {
3421                 struct eth_query_ramrod_data ramrod_data = {0};
3422                 int i, rc;
3423
3424                 ramrod_data.drv_counter = bp->stats_counter++;
3425                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3426                 for_each_queue(bp, i)
3427                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3428
3429                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3430                                    ((u32 *)&ramrod_data)[1],
3431                                    ((u32 *)&ramrod_data)[0], 0);
3432                 if (rc == 0) {
3433                         /* stats ramrod has it's own slot on the spq */
3434                         bp->spq_left++;
3435                         bp->stats_pending = 1;
3436                 }
3437         }
3438 }
3439
3440 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3441 {
3442         struct dmae_command *dmae = &bp->stats_dmae;
3443         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3444
3445         *stats_comp = DMAE_COMP_VAL;
3446         if (CHIP_REV_IS_SLOW(bp))
3447                 return;
3448
3449         /* loader */
3450         if (bp->executer_idx) {
3451                 int loader_idx = PMF_DMAE_C(bp);
3452
3453                 memset(dmae, 0, sizeof(struct dmae_command));
3454
3455                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3456                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3457                                 DMAE_CMD_DST_RESET |
3458 #ifdef __BIG_ENDIAN
3459                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3460 #else
3461                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3462 #endif
3463                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3464                                                DMAE_CMD_PORT_0) |
3465                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3466                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3467                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3468                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3469                                      sizeof(struct dmae_command) *
3470                                      (loader_idx + 1)) >> 2;
3471                 dmae->dst_addr_hi = 0;
3472                 dmae->len = sizeof(struct dmae_command) >> 2;
3473                 if (CHIP_IS_E1(bp))
3474                         dmae->len--;
3475                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3476                 dmae->comp_addr_hi = 0;
3477                 dmae->comp_val = 1;
3478
3479                 *stats_comp = 0;
3480                 bnx2x_post_dmae(bp, dmae, loader_idx);
3481
3482         } else if (bp->func_stx) {
3483                 *stats_comp = 0;
3484                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3485         }
3486 }
3487
3488 static int bnx2x_stats_comp(struct bnx2x *bp)
3489 {
3490         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3491         int cnt = 10;
3492
3493         might_sleep();
3494         while (*stats_comp != DMAE_COMP_VAL) {
3495                 if (!cnt) {
3496                         BNX2X_ERR("timeout waiting for stats finished\n");
3497                         break;
3498                 }
3499                 cnt--;
3500                 msleep(1);
3501         }
3502         return 1;
3503 }
3504
3505 /*
3506  * Statistics service functions
3507  */
3508
3509 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3510 {
3511         struct dmae_command *dmae;
3512         u32 opcode;
3513         int loader_idx = PMF_DMAE_C(bp);
3514         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3515
3516         /* sanity */
3517         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3518                 BNX2X_ERR("BUG!\n");
3519                 return;
3520         }
3521
3522         bp->executer_idx = 0;
3523
3524         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3525                   DMAE_CMD_C_ENABLE |
3526                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3527 #ifdef __BIG_ENDIAN
3528                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3529 #else
3530                   DMAE_CMD_ENDIANITY_DW_SWAP |
3531 #endif
3532                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3533                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3534
3535         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3537         dmae->src_addr_lo = bp->port.port_stx >> 2;
3538         dmae->src_addr_hi = 0;
3539         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3540         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3541         dmae->len = DMAE_LEN32_RD_MAX;
3542         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3543         dmae->comp_addr_hi = 0;
3544         dmae->comp_val = 1;
3545
3546         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3547         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3548         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3549         dmae->src_addr_hi = 0;
3550         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3551                                    DMAE_LEN32_RD_MAX * 4);
3552         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3553                                    DMAE_LEN32_RD_MAX * 4);
3554         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3555         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3556         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3557         dmae->comp_val = DMAE_COMP_VAL;
3558
3559         *stats_comp = 0;
3560         bnx2x_hw_stats_post(bp);
3561         bnx2x_stats_comp(bp);
3562 }
3563
3564 static void bnx2x_port_stats_init(struct bnx2x *bp)
3565 {
3566         struct dmae_command *dmae;
3567         int port = BP_PORT(bp);
3568         int vn = BP_E1HVN(bp);
3569         u32 opcode;
3570         int loader_idx = PMF_DMAE_C(bp);
3571         u32 mac_addr;
3572         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3573
3574         /* sanity */
3575         if (!bp->link_vars.link_up || !bp->port.pmf) {
3576                 BNX2X_ERR("BUG!\n");
3577                 return;
3578         }
3579
3580         bp->executer_idx = 0;
3581
3582         /* MCP */
3583         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3584                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3585                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3586 #ifdef __BIG_ENDIAN
3587                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3588 #else
3589                   DMAE_CMD_ENDIANITY_DW_SWAP |
3590 #endif
3591                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3592                   (vn << DMAE_CMD_E1HVN_SHIFT));
3593
3594         if (bp->port.port_stx) {
3595
3596                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3597                 dmae->opcode = opcode;
3598                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3599                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3600                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3601                 dmae->dst_addr_hi = 0;
3602                 dmae->len = sizeof(struct host_port_stats) >> 2;
3603                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3604                 dmae->comp_addr_hi = 0;
3605                 dmae->comp_val = 1;
3606         }
3607
3608         if (bp->func_stx) {
3609
3610                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3611                 dmae->opcode = opcode;
3612                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3613                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3614                 dmae->dst_addr_lo = bp->func_stx >> 2;
3615                 dmae->dst_addr_hi = 0;
3616                 dmae->len = sizeof(struct host_func_stats) >> 2;
3617                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3618                 dmae->comp_addr_hi = 0;
3619                 dmae->comp_val = 1;
3620         }
3621
3622         /* MAC */
3623         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3624                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3625                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3626 #ifdef __BIG_ENDIAN
3627                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3628 #else
3629                   DMAE_CMD_ENDIANITY_DW_SWAP |
3630 #endif
3631                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3632                   (vn << DMAE_CMD_E1HVN_SHIFT));
3633
3634         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3635
3636                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3637                                    NIG_REG_INGRESS_BMAC0_MEM);
3638
3639                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3640                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3641                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3642                 dmae->opcode = opcode;
3643                 dmae->src_addr_lo = (mac_addr +
3644                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3645                 dmae->src_addr_hi = 0;
3646                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3647                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3648                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3649                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3650                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3651                 dmae->comp_addr_hi = 0;
3652                 dmae->comp_val = 1;
3653
3654                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3655                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3656                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3657                 dmae->opcode = opcode;
3658                 dmae->src_addr_lo = (mac_addr +
3659                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3660                 dmae->src_addr_hi = 0;
3661                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3662                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3663                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3664                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3665                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3666                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3667                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3668                 dmae->comp_addr_hi = 0;
3669                 dmae->comp_val = 1;
3670
3671         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3672
3673                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3674
3675                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3676                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3677                 dmae->opcode = opcode;
3678                 dmae->src_addr_lo = (mac_addr +
3679                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3680                 dmae->src_addr_hi = 0;
3681                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3682                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3683                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3684                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3685                 dmae->comp_addr_hi = 0;
3686                 dmae->comp_val = 1;
3687
3688                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3689                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3690                 dmae->opcode = opcode;
3691                 dmae->src_addr_lo = (mac_addr +
3692                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3693                 dmae->src_addr_hi = 0;
3694                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3695                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3696                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3697                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3698                 dmae->len = 1;
3699                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3700                 dmae->comp_addr_hi = 0;
3701                 dmae->comp_val = 1;
3702
3703                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3704                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3705                 dmae->opcode = opcode;
3706                 dmae->src_addr_lo = (mac_addr +
3707                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3708                 dmae->src_addr_hi = 0;
3709                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3710                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3711                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3712                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3713                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3714                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3715                 dmae->comp_addr_hi = 0;
3716                 dmae->comp_val = 1;
3717         }
3718
3719         /* NIG */
3720         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3721         dmae->opcode = opcode;
3722         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3723                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3724         dmae->src_addr_hi = 0;
3725         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3726         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3727         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3728         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3729         dmae->comp_addr_hi = 0;
3730         dmae->comp_val = 1;
3731
3732         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3733         dmae->opcode = opcode;
3734         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3735                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3736         dmae->src_addr_hi = 0;
3737         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3738                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3739         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3740                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3741         dmae->len = (2*sizeof(u32)) >> 2;
3742         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3743         dmae->comp_addr_hi = 0;
3744         dmae->comp_val = 1;
3745
3746         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3747         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3748                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3749                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3750 #ifdef __BIG_ENDIAN
3751                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3752 #else
3753                         DMAE_CMD_ENDIANITY_DW_SWAP |
3754 #endif
3755                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3756                         (vn << DMAE_CMD_E1HVN_SHIFT));
3757         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3758                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3759         dmae->src_addr_hi = 0;
3760         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3761                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3762         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3763                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3764         dmae->len = (2*sizeof(u32)) >> 2;
3765         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3766         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3767         dmae->comp_val = DMAE_COMP_VAL;
3768
3769         *stats_comp = 0;
3770 }
3771
3772 static void bnx2x_func_stats_init(struct bnx2x *bp)
3773 {
3774         struct dmae_command *dmae = &bp->stats_dmae;
3775         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3776
3777         /* sanity */
3778         if (!bp->func_stx) {
3779                 BNX2X_ERR("BUG!\n");
3780                 return;
3781         }
3782
3783         bp->executer_idx = 0;
3784         memset(dmae, 0, sizeof(struct dmae_command));
3785
3786         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3787                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3788                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3789 #ifdef __BIG_ENDIAN
3790                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3791 #else
3792                         DMAE_CMD_ENDIANITY_DW_SWAP |
3793 #endif
3794                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3795                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3796         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3797         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3798         dmae->dst_addr_lo = bp->func_stx >> 2;
3799         dmae->dst_addr_hi = 0;
3800         dmae->len = sizeof(struct host_func_stats) >> 2;
3801         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3802         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3803         dmae->comp_val = DMAE_COMP_VAL;
3804
3805         *stats_comp = 0;
3806 }
3807
3808 static void bnx2x_stats_start(struct bnx2x *bp)
3809 {
3810         if (bp->port.pmf)
3811                 bnx2x_port_stats_init(bp);
3812
3813         else if (bp->func_stx)
3814                 bnx2x_func_stats_init(bp);
3815
3816         bnx2x_hw_stats_post(bp);
3817         bnx2x_storm_stats_post(bp);
3818 }
3819
3820 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3821 {
3822         bnx2x_stats_comp(bp);
3823         bnx2x_stats_pmf_update(bp);
3824         bnx2x_stats_start(bp);
3825 }
3826
3827 static void bnx2x_stats_restart(struct bnx2x *bp)
3828 {
3829         bnx2x_stats_comp(bp);
3830         bnx2x_stats_start(bp);
3831 }
3832
3833 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3834 {
3835         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3836         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3837         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3838         struct {
3839                 u32 lo;
3840                 u32 hi;
3841         } diff;
3842
3843         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3844         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3845         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3846         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3847         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3848         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3849         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3850         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3851         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3852         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3853         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3854         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3855         UPDATE_STAT64(tx_stat_gt127,
3856                                 tx_stat_etherstatspkts65octetsto127octets);
3857         UPDATE_STAT64(tx_stat_gt255,
3858                                 tx_stat_etherstatspkts128octetsto255octets);
3859         UPDATE_STAT64(tx_stat_gt511,
3860                                 tx_stat_etherstatspkts256octetsto511octets);
3861         UPDATE_STAT64(tx_stat_gt1023,
3862                                 tx_stat_etherstatspkts512octetsto1023octets);
3863         UPDATE_STAT64(tx_stat_gt1518,
3864                                 tx_stat_etherstatspkts1024octetsto1522octets);
3865         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3866         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3867         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3868         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3869         UPDATE_STAT64(tx_stat_gterr,
3870                                 tx_stat_dot3statsinternalmactransmiterrors);
3871         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3872
3873         estats->pause_frames_received_hi =
3874                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3875         estats->pause_frames_received_lo =
3876                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3877
3878         estats->pause_frames_sent_hi =
3879                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3880         estats->pause_frames_sent_lo =
3881                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3882 }
3883
3884 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3885 {
3886         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3887         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3888         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3889
3890         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3891         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3892         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3893         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3894         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3895         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3896         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3897         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3898         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3899         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3900         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3901         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3902         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3903         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3904         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3905         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3906         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3907         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3908         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3909         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3910         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3911         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3912         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3913         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3914         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3915         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3916         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3917         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3918         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3919         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3920         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3921
3922         estats->pause_frames_received_hi =
3923                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3924         estats->pause_frames_received_lo =
3925                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3926         ADD_64(estats->pause_frames_received_hi,
3927                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3928                estats->pause_frames_received_lo,
3929                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3930
3931         estats->pause_frames_sent_hi =
3932                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3933         estats->pause_frames_sent_lo =
3934                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3935         ADD_64(estats->pause_frames_sent_hi,
3936                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3937                estats->pause_frames_sent_lo,
3938                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3939 }
3940
3941 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3942 {
3943         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3944         struct nig_stats *old = &(bp->port.old_nig_stats);
3945         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3946         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3947         struct {
3948                 u32 lo;
3949                 u32 hi;
3950         } diff;
3951         u32 nig_timer_max;
3952
3953         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3954                 bnx2x_bmac_stats_update(bp);
3955
3956         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3957                 bnx2x_emac_stats_update(bp);
3958
3959         else { /* unreached */
3960                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3961                 return -1;
3962         }
3963
3964         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3965                       new->brb_discard - old->brb_discard);
3966         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3967                       new->brb_truncate - old->brb_truncate);
3968
3969         UPDATE_STAT64_NIG(egress_mac_pkt0,
3970                                         etherstatspkts1024octetsto1522octets);
3971         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3972
3973         memcpy(old, new, sizeof(struct nig_stats));
3974
3975         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3976                sizeof(struct mac_stx));
3977         estats->brb_drop_hi = pstats->brb_drop_hi;
3978         estats->brb_drop_lo = pstats->brb_drop_lo;
3979
3980         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3981
3982         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3983         if (nig_timer_max != estats->nig_timer_max) {
3984                 estats->nig_timer_max = nig_timer_max;
3985                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3986         }
3987
3988         return 0;
3989 }
3990
3991 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3992 {
3993         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3994         struct tstorm_per_port_stats *tport =
3995                                         &stats->tstorm_common.port_statistics;
3996         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3997         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3998         int i;
3999
4000         memcpy(&(fstats->total_bytes_received_hi),
4001                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4002                sizeof(struct host_func_stats) - 2*sizeof(u32));
4003         estats->error_bytes_received_hi = 0;
4004         estats->error_bytes_received_lo = 0;
4005         estats->etherstatsoverrsizepkts_hi = 0;
4006         estats->etherstatsoverrsizepkts_lo = 0;
4007         estats->no_buff_discard_hi = 0;
4008         estats->no_buff_discard_lo = 0;
4009
4010         for_each_queue(bp, i) {
4011                 struct bnx2x_fastpath *fp = &bp->fp[i];
4012                 int cl_id = fp->cl_id;
4013                 struct tstorm_per_client_stats *tclient =
4014                                 &stats->tstorm_common.client_statistics[cl_id];
4015                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4016                 struct ustorm_per_client_stats *uclient =
4017                                 &stats->ustorm_common.client_statistics[cl_id];
4018                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4019                 struct xstorm_per_client_stats *xclient =
4020                                 &stats->xstorm_common.client_statistics[cl_id];
4021                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4022                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4023                 u32 diff;
4024
4025                 /* are storm stats valid? */
4026                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4027                                                         bp->stats_counter) {
4028                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4029                            "  xstorm counter (%d) != stats_counter (%d)\n",
4030                            i, xclient->stats_counter, bp->stats_counter);
4031                         return -1;
4032                 }
4033                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4034                                                         bp->stats_counter) {
4035                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4036                            "  tstorm counter (%d) != stats_counter (%d)\n",
4037                            i, tclient->stats_counter, bp->stats_counter);
4038                         return -2;
4039                 }
4040                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4041                                                         bp->stats_counter) {
4042                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4043                            "  ustorm counter (%d) != stats_counter (%d)\n",
4044                            i, uclient->stats_counter, bp->stats_counter);
4045                         return -4;
4046                 }
4047
4048                 qstats->total_bytes_received_hi =
4049                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4050                 qstats->total_bytes_received_lo =
4051                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4052
4053                 ADD_64(qstats->total_bytes_received_hi,
4054                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4055                        qstats->total_bytes_received_lo,
4056                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4057
4058                 ADD_64(qstats->total_bytes_received_hi,
4059                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4060                        qstats->total_bytes_received_lo,
4061                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4062
4063                 qstats->valid_bytes_received_hi =
4064                                         qstats->total_bytes_received_hi;
4065                 qstats->valid_bytes_received_lo =
4066                                         qstats->total_bytes_received_lo;
4067
4068                 qstats->error_bytes_received_hi =
4069                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4070                 qstats->error_bytes_received_lo =
4071                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4072
4073                 ADD_64(qstats->total_bytes_received_hi,
4074                        qstats->error_bytes_received_hi,
4075                        qstats->total_bytes_received_lo,
4076                        qstats->error_bytes_received_lo);
4077
4078                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4079                                         total_unicast_packets_received);
4080                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4081                                         total_multicast_packets_received);
4082                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4083                                         total_broadcast_packets_received);
4084                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4085                                         etherstatsoverrsizepkts);
4086                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4087
4088                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4089                                         total_unicast_packets_received);
4090                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4091                                         total_multicast_packets_received);
4092                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4093                                         total_broadcast_packets_received);
4094                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4095                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4096                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4097
4098                 qstats->total_bytes_transmitted_hi =
4099                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4100                 qstats->total_bytes_transmitted_lo =
4101                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4102
4103                 ADD_64(qstats->total_bytes_transmitted_hi,
4104                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4105                        qstats->total_bytes_transmitted_lo,
4106                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4107
4108                 ADD_64(qstats->total_bytes_transmitted_hi,
4109                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4110                        qstats->total_bytes_transmitted_lo,
4111                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4112
4113                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4114                                         total_unicast_packets_transmitted);
4115                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4116                                         total_multicast_packets_transmitted);
4117                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4118                                         total_broadcast_packets_transmitted);
4119
4120                 old_tclient->checksum_discard = tclient->checksum_discard;
4121                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4122
4123                 ADD_64(fstats->total_bytes_received_hi,
4124                        qstats->total_bytes_received_hi,
4125                        fstats->total_bytes_received_lo,
4126                        qstats->total_bytes_received_lo);
4127                 ADD_64(fstats->total_bytes_transmitted_hi,
4128                        qstats->total_bytes_transmitted_hi,
4129                        fstats->total_bytes_transmitted_lo,
4130                        qstats->total_bytes_transmitted_lo);
4131                 ADD_64(fstats->total_unicast_packets_received_hi,
4132                        qstats->total_unicast_packets_received_hi,
4133                        fstats->total_unicast_packets_received_lo,
4134                        qstats->total_unicast_packets_received_lo);
4135                 ADD_64(fstats->total_multicast_packets_received_hi,
4136                        qstats->total_multicast_packets_received_hi,
4137                        fstats->total_multicast_packets_received_lo,
4138                        qstats->total_multicast_packets_received_lo);
4139                 ADD_64(fstats->total_broadcast_packets_received_hi,
4140                        qstats->total_broadcast_packets_received_hi,
4141                        fstats->total_broadcast_packets_received_lo,
4142                        qstats->total_broadcast_packets_received_lo);
4143                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4144                        qstats->total_unicast_packets_transmitted_hi,
4145                        fstats->total_unicast_packets_transmitted_lo,
4146                        qstats->total_unicast_packets_transmitted_lo);
4147                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4148                        qstats->total_multicast_packets_transmitted_hi,
4149                        fstats->total_multicast_packets_transmitted_lo,
4150                        qstats->total_multicast_packets_transmitted_lo);
4151                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4152                        qstats->total_broadcast_packets_transmitted_hi,
4153                        fstats->total_broadcast_packets_transmitted_lo,
4154                        qstats->total_broadcast_packets_transmitted_lo);
4155                 ADD_64(fstats->valid_bytes_received_hi,
4156                        qstats->valid_bytes_received_hi,
4157                        fstats->valid_bytes_received_lo,
4158                        qstats->valid_bytes_received_lo);
4159
4160                 ADD_64(estats->error_bytes_received_hi,
4161                        qstats->error_bytes_received_hi,
4162                        estats->error_bytes_received_lo,
4163                        qstats->error_bytes_received_lo);
4164                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4165                        qstats->etherstatsoverrsizepkts_hi,
4166                        estats->etherstatsoverrsizepkts_lo,
4167                        qstats->etherstatsoverrsizepkts_lo);
4168                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4169                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4170         }
4171
4172         ADD_64(fstats->total_bytes_received_hi,
4173                estats->rx_stat_ifhcinbadoctets_hi,
4174                fstats->total_bytes_received_lo,
4175                estats->rx_stat_ifhcinbadoctets_lo);
4176
4177         memcpy(estats, &(fstats->total_bytes_received_hi),
4178                sizeof(struct host_func_stats) - 2*sizeof(u32));
4179
4180         ADD_64(estats->etherstatsoverrsizepkts_hi,
4181                estats->rx_stat_dot3statsframestoolong_hi,
4182                estats->etherstatsoverrsizepkts_lo,
4183                estats->rx_stat_dot3statsframestoolong_lo);
4184         ADD_64(estats->error_bytes_received_hi,
4185                estats->rx_stat_ifhcinbadoctets_hi,
4186                estats->error_bytes_received_lo,
4187                estats->rx_stat_ifhcinbadoctets_lo);
4188
4189         if (bp->port.pmf) {
4190                 estats->mac_filter_discard =
4191                                 le32_to_cpu(tport->mac_filter_discard);
4192                 estats->xxoverflow_discard =
4193                                 le32_to_cpu(tport->xxoverflow_discard);
4194                 estats->brb_truncate_discard =
4195                                 le32_to_cpu(tport->brb_truncate_discard);
4196                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4197         }
4198
4199         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4200
4201         bp->stats_pending = 0;
4202
4203         return 0;
4204 }
4205
4206 static void bnx2x_net_stats_update(struct bnx2x *bp)
4207 {
4208         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4209         struct net_device_stats *nstats = &bp->dev->stats;
4210         int i;
4211
4212         nstats->rx_packets =
4213                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4214                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4215                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4216
4217         nstats->tx_packets =
4218                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4219                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4220                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4221
4222         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4223
4224         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4225
4226         nstats->rx_dropped = estats->mac_discard;
4227         for_each_queue(bp, i)
4228                 nstats->rx_dropped +=
4229                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4230
4231         nstats->tx_dropped = 0;
4232
4233         nstats->multicast =
4234                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4235
4236         nstats->collisions =
4237                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4238
4239         nstats->rx_length_errors =
4240                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4241                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4242         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4243                                  bnx2x_hilo(&estats->brb_truncate_hi);
4244         nstats->rx_crc_errors =
4245                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4246         nstats->rx_frame_errors =
4247                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4248         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4249         nstats->rx_missed_errors = estats->xxoverflow_discard;
4250
4251         nstats->rx_errors = nstats->rx_length_errors +
4252                             nstats->rx_over_errors +
4253                             nstats->rx_crc_errors +
4254                             nstats->rx_frame_errors +
4255                             nstats->rx_fifo_errors +
4256                             nstats->rx_missed_errors;
4257
4258         nstats->tx_aborted_errors =
4259                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4260                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4261         nstats->tx_carrier_errors =
4262                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4263         nstats->tx_fifo_errors = 0;
4264         nstats->tx_heartbeat_errors = 0;
4265         nstats->tx_window_errors = 0;
4266
4267         nstats->tx_errors = nstats->tx_aborted_errors +
4268                             nstats->tx_carrier_errors +
4269             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4270 }
4271
4272 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4273 {
4274         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4275         int i;
4276
4277         estats->driver_xoff = 0;
4278         estats->rx_err_discard_pkt = 0;
4279         estats->rx_skb_alloc_failed = 0;
4280         estats->hw_csum_err = 0;
4281         for_each_queue(bp, i) {
4282                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4283
4284                 estats->driver_xoff += qstats->driver_xoff;
4285                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4286                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4287                 estats->hw_csum_err += qstats->hw_csum_err;
4288         }
4289 }
4290
4291 static void bnx2x_stats_update(struct bnx2x *bp)
4292 {
4293         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4294
4295         if (*stats_comp != DMAE_COMP_VAL)
4296                 return;
4297
4298         if (bp->port.pmf)
4299                 bnx2x_hw_stats_update(bp);
4300
4301         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4302                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4303                 bnx2x_panic();
4304                 return;
4305         }
4306
4307         bnx2x_net_stats_update(bp);
4308         bnx2x_drv_stats_update(bp);
4309
4310         if (netif_msg_timer(bp)) {
4311                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4312                 struct bnx2x_fastpath *fp0_tx = bp->fp;
4313                 struct tstorm_per_client_stats *old_tclient =
4314                                                         &bp->fp->old_tclient;
4315                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4316                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4317                 struct net_device_stats *nstats = &bp->dev->stats;
4318                 int i;
4319
4320                 netdev_printk(KERN_DEBUG, bp->dev, "\n");
4321                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4322                                   "  tx pkt (%lx)\n",
4323                        bnx2x_tx_avail(fp0_tx),
4324                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4325                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4326                                   "  rx pkt (%lx)\n",
4327                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4328                              fp0_rx->rx_comp_cons),
4329                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4330                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4331                                   "brb truncate %u\n",
4332                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4333                        qstats->driver_xoff,
4334                        estats->brb_drop_lo, estats->brb_truncate_lo);
4335                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4336                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4337                         "mac_discard %u  mac_filter_discard %u  "
4338                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4339                         "ttl0_discard %u\n",
4340                        le32_to_cpu(old_tclient->checksum_discard),
4341                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4342                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4343                        estats->mac_discard, estats->mac_filter_discard,
4344                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4345                        le32_to_cpu(old_tclient->ttl0_discard));
4346
4347                 for_each_queue(bp, i) {
4348                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4349                                bnx2x_fp(bp, i, tx_pkt),
4350                                bnx2x_fp(bp, i, rx_pkt),
4351                                bnx2x_fp(bp, i, rx_calls));
4352                 }
4353         }
4354
4355         bnx2x_hw_stats_post(bp);
4356         bnx2x_storm_stats_post(bp);
4357 }
4358
4359 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4360 {
4361         struct dmae_command *dmae;
4362         u32 opcode;
4363         int loader_idx = PMF_DMAE_C(bp);
4364         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4365
4366         bp->executer_idx = 0;
4367
4368         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4369                   DMAE_CMD_C_ENABLE |
4370                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4371 #ifdef __BIG_ENDIAN
4372                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4373 #else
4374                   DMAE_CMD_ENDIANITY_DW_SWAP |
4375 #endif
4376                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4377                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4378
4379         if (bp->port.port_stx) {
4380
4381                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4382                 if (bp->func_stx)
4383                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4384                 else
4385                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4386                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4387                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4388                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4389                 dmae->dst_addr_hi = 0;
4390                 dmae->len = sizeof(struct host_port_stats) >> 2;
4391                 if (bp->func_stx) {
4392                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4393                         dmae->comp_addr_hi = 0;
4394                         dmae->comp_val = 1;
4395                 } else {
4396                         dmae->comp_addr_lo =
4397                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4398                         dmae->comp_addr_hi =
4399                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4400                         dmae->comp_val = DMAE_COMP_VAL;
4401
4402                         *stats_comp = 0;
4403                 }
4404         }
4405
4406         if (bp->func_stx) {
4407
4408                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4409                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4410                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4411                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4412                 dmae->dst_addr_lo = bp->func_stx >> 2;
4413                 dmae->dst_addr_hi = 0;
4414                 dmae->len = sizeof(struct host_func_stats) >> 2;
4415                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4416                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4417                 dmae->comp_val = DMAE_COMP_VAL;
4418
4419                 *stats_comp = 0;
4420         }
4421 }
4422
4423 static void bnx2x_stats_stop(struct bnx2x *bp)
4424 {
4425         int update = 0;
4426
4427         bnx2x_stats_comp(bp);
4428
4429         if (bp->port.pmf)
4430                 update = (bnx2x_hw_stats_update(bp) == 0);
4431
4432         update |= (bnx2x_storm_stats_update(bp) == 0);
4433
4434         if (update) {
4435                 bnx2x_net_stats_update(bp);
4436
4437                 if (bp->port.pmf)
4438                         bnx2x_port_stats_stop(bp);
4439
4440                 bnx2x_hw_stats_post(bp);
4441                 bnx2x_stats_comp(bp);
4442         }
4443 }
4444
4445 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4446 {
4447 }
4448
4449 static const struct {
4450         void (*action)(struct bnx2x *bp);
4451         enum bnx2x_stats_state next_state;
4452 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4453 /* state        event   */
4454 {
4455 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4456 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4457 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4458 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4459 },
4460 {
4461 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4462 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4463 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4464 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4465 }
4466 };
4467
4468 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4469 {
4470         enum bnx2x_stats_state state = bp->stats_state;
4471
4472         bnx2x_stats_stm[state][event].action(bp);
4473         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4474
4475         /* Make sure the state has been "changed" */
4476         smp_wmb();
4477
4478         if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4479                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4480                    state, event, bp->stats_state);
4481 }
4482
4483 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4484 {
4485         struct dmae_command *dmae;
4486         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4487
4488         /* sanity */
4489         if (!bp->port.pmf || !bp->port.port_stx) {
4490                 BNX2X_ERR("BUG!\n");
4491                 return;
4492         }
4493
4494         bp->executer_idx = 0;
4495
4496         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4497         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4498                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4499                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4500 #ifdef __BIG_ENDIAN
4501                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4502 #else
4503                         DMAE_CMD_ENDIANITY_DW_SWAP |
4504 #endif
4505                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4506                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4507         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4508         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4509         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4510         dmae->dst_addr_hi = 0;
4511         dmae->len = sizeof(struct host_port_stats) >> 2;
4512         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4513         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4514         dmae->comp_val = DMAE_COMP_VAL;
4515
4516         *stats_comp = 0;
4517         bnx2x_hw_stats_post(bp);
4518         bnx2x_stats_comp(bp);
4519 }
4520
4521 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4522 {
4523         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4524         int port = BP_PORT(bp);
4525         int func;
4526         u32 func_stx;
4527
4528         /* sanity */
4529         if (!bp->port.pmf || !bp->func_stx) {
4530                 BNX2X_ERR("BUG!\n");
4531                 return;
4532         }
4533
4534         /* save our func_stx */
4535         func_stx = bp->func_stx;
4536
4537         for (vn = VN_0; vn < vn_max; vn++) {
4538                 func = 2*vn + port;
4539
4540                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4541                 bnx2x_func_stats_init(bp);
4542                 bnx2x_hw_stats_post(bp);
4543                 bnx2x_stats_comp(bp);
4544         }
4545
4546         /* restore our func_stx */
4547         bp->func_stx = func_stx;
4548 }
4549
4550 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4551 {
4552         struct dmae_command *dmae = &bp->stats_dmae;
4553         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4554
4555         /* sanity */
4556         if (!bp->func_stx) {
4557                 BNX2X_ERR("BUG!\n");
4558                 return;
4559         }
4560
4561         bp->executer_idx = 0;
4562         memset(dmae, 0, sizeof(struct dmae_command));
4563
4564         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4565                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4566                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4567 #ifdef __BIG_ENDIAN
4568                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4569 #else
4570                         DMAE_CMD_ENDIANITY_DW_SWAP |
4571 #endif
4572                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4573                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4574         dmae->src_addr_lo = bp->func_stx >> 2;
4575         dmae->src_addr_hi = 0;
4576         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4577         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4578         dmae->len = sizeof(struct host_func_stats) >> 2;
4579         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4580         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4581         dmae->comp_val = DMAE_COMP_VAL;
4582
4583         *stats_comp = 0;
4584         bnx2x_hw_stats_post(bp);
4585         bnx2x_stats_comp(bp);
4586 }
4587
4588 static void bnx2x_stats_init(struct bnx2x *bp)
4589 {
4590         int port = BP_PORT(bp);
4591         int func = BP_FUNC(bp);
4592         int i;
4593
4594         bp->stats_pending = 0;
4595         bp->executer_idx = 0;
4596         bp->stats_counter = 0;
4597
4598         /* port and func stats for management */
4599         if (!BP_NOMCP(bp)) {
4600                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4601                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4602
4603         } else {
4604                 bp->port.port_stx = 0;
4605                 bp->func_stx = 0;
4606         }
4607         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4608            bp->port.port_stx, bp->func_stx);
4609
4610         /* port stats */
4611         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4612         bp->port.old_nig_stats.brb_discard =
4613                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4614         bp->port.old_nig_stats.brb_truncate =
4615                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4616         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4617                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4618         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4619                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4620
4621         /* function stats */
4622         for_each_queue(bp, i) {
4623                 struct bnx2x_fastpath *fp = &bp->fp[i];
4624
4625                 memset(&fp->old_tclient, 0,
4626                        sizeof(struct tstorm_per_client_stats));
4627                 memset(&fp->old_uclient, 0,
4628                        sizeof(struct ustorm_per_client_stats));
4629                 memset(&fp->old_xclient, 0,
4630                        sizeof(struct xstorm_per_client_stats));
4631                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4632         }
4633
4634         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4635         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4636
4637         bp->stats_state = STATS_STATE_DISABLED;
4638
4639         if (bp->port.pmf) {
4640                 if (bp->port.port_stx)
4641                         bnx2x_port_stats_base_init(bp);
4642
4643                 if (bp->func_stx)
4644                         bnx2x_func_stats_base_init(bp);
4645
4646         } else if (bp->func_stx)
4647                 bnx2x_func_stats_base_update(bp);
4648 }
4649
4650 static void bnx2x_timer(unsigned long data)
4651 {
4652         struct bnx2x *bp = (struct bnx2x *) data;
4653
4654         if (!netif_running(bp->dev))
4655                 return;
4656
4657         if (atomic_read(&bp->intr_sem) != 0)
4658                 goto timer_restart;
4659
4660         if (poll) {
4661                 struct bnx2x_fastpath *fp = &bp->fp[0];
4662                 int rc;
4663
4664                 bnx2x_tx_int(fp);
4665                 rc = bnx2x_rx_int(fp, 1000);
4666         }
4667
4668         if (!BP_NOMCP(bp)) {
4669                 int func = BP_FUNC(bp);
4670                 u32 drv_pulse;
4671                 u32 mcp_pulse;
4672
4673                 ++bp->fw_drv_pulse_wr_seq;
4674                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4675                 /* TBD - add SYSTEM_TIME */
4676                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4677                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4678
4679                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4680                              MCP_PULSE_SEQ_MASK);
4681                 /* The delta between driver pulse and mcp response
4682                  * should be 1 (before mcp response) or 0 (after mcp response)
4683                  */
4684                 if ((drv_pulse != mcp_pulse) &&
4685                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4686                         /* someone lost a heartbeat... */
4687                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4688                                   drv_pulse, mcp_pulse);
4689                 }
4690         }
4691
4692         if (bp->state == BNX2X_STATE_OPEN)
4693                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4694
4695 timer_restart:
4696         mod_timer(&bp->timer, jiffies + bp->current_interval);
4697 }
4698
4699 /* end of Statistics */
4700
4701 /* nic init */
4702
4703 /*
4704  * nic init service functions
4705  */
4706
4707 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4708 {
4709         int port = BP_PORT(bp);
4710
4711         /* "CSTORM" */
4712         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4713                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4714                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4715         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4716                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4717                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4718 }
4719
4720 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4721                           dma_addr_t mapping, int sb_id)
4722 {
4723         int port = BP_PORT(bp);
4724         int func = BP_FUNC(bp);
4725         int index;
4726         u64 section;
4727
4728         /* USTORM */
4729         section = ((u64)mapping) + offsetof(struct host_status_block,
4730                                             u_status_block);
4731         sb->u_status_block.status_block_id = sb_id;
4732
4733         REG_WR(bp, BAR_CSTRORM_INTMEM +
4734                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4735         REG_WR(bp, BAR_CSTRORM_INTMEM +
4736                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4737                U64_HI(section));
4738         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4739                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4740
4741         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4742                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4743                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4744
4745         /* CSTORM */
4746         section = ((u64)mapping) + offsetof(struct host_status_block,
4747                                             c_status_block);
4748         sb->c_status_block.status_block_id = sb_id;
4749
4750         REG_WR(bp, BAR_CSTRORM_INTMEM +
4751                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4752         REG_WR(bp, BAR_CSTRORM_INTMEM +
4753                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4754                U64_HI(section));
4755         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4756                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4757
4758         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4759                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4760                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4761
4762         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4763 }
4764
4765 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4766 {
4767         int func = BP_FUNC(bp);
4768
4769         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4770                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4771                         sizeof(struct tstorm_def_status_block)/4);
4772         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4773                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4774                         sizeof(struct cstorm_def_status_block_u)/4);
4775         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4776                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4777                         sizeof(struct cstorm_def_status_block_c)/4);
4778         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4779                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4780                         sizeof(struct xstorm_def_status_block)/4);
4781 }
4782
4783 static void bnx2x_init_def_sb(struct bnx2x *bp,
4784                               struct host_def_status_block *def_sb,
4785                               dma_addr_t mapping, int sb_id)
4786 {
4787         int port = BP_PORT(bp);
4788         int func = BP_FUNC(bp);
4789         int index, val, reg_offset;
4790         u64 section;
4791
4792         /* ATTN */
4793         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4794                                             atten_status_block);
4795         def_sb->atten_status_block.status_block_id = sb_id;
4796
4797         bp->attn_state = 0;
4798
4799         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4800                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4801
4802         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4803                 bp->attn_group[index].sig[0] = REG_RD(bp,
4804                                                      reg_offset + 0x10*index);
4805                 bp->attn_group[index].sig[1] = REG_RD(bp,
4806                                                reg_offset + 0x4 + 0x10*index);
4807                 bp->attn_group[index].sig[2] = REG_RD(bp,
4808                                                reg_offset + 0x8 + 0x10*index);
4809                 bp->attn_group[index].sig[3] = REG_RD(bp,
4810                                                reg_offset + 0xc + 0x10*index);
4811         }
4812
4813         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4814                              HC_REG_ATTN_MSG0_ADDR_L);
4815
4816         REG_WR(bp, reg_offset, U64_LO(section));
4817         REG_WR(bp, reg_offset + 4, U64_HI(section));
4818
4819         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4820
4821         val = REG_RD(bp, reg_offset);
4822         val |= sb_id;
4823         REG_WR(bp, reg_offset, val);
4824
4825         /* USTORM */
4826         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4827                                             u_def_status_block);
4828         def_sb->u_def_status_block.status_block_id = sb_id;
4829
4830         REG_WR(bp, BAR_CSTRORM_INTMEM +
4831                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4832         REG_WR(bp, BAR_CSTRORM_INTMEM +
4833                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4834                U64_HI(section));
4835         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4836                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4837
4838         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4839                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4840                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4841
4842         /* CSTORM */
4843         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4844                                             c_def_status_block);
4845         def_sb->c_def_status_block.status_block_id = sb_id;
4846
4847         REG_WR(bp, BAR_CSTRORM_INTMEM +
4848                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4849         REG_WR(bp, BAR_CSTRORM_INTMEM +
4850                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4851                U64_HI(section));
4852         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4853                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4854
4855         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4856                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4857                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4858
4859         /* TSTORM */
4860         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4861                                             t_def_status_block);
4862         def_sb->t_def_status_block.status_block_id = sb_id;
4863
4864         REG_WR(bp, BAR_TSTRORM_INTMEM +
4865                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4866         REG_WR(bp, BAR_TSTRORM_INTMEM +
4867                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4868                U64_HI(section));
4869         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4870                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4871
4872         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4873                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4874                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4875
4876         /* XSTORM */
4877         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4878                                             x_def_status_block);
4879         def_sb->x_def_status_block.status_block_id = sb_id;
4880
4881         REG_WR(bp, BAR_XSTRORM_INTMEM +
4882                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4883         REG_WR(bp, BAR_XSTRORM_INTMEM +
4884                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4885                U64_HI(section));
4886         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4887                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4888
4889         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4890                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4891                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4892
4893         bp->stats_pending = 0;
4894         bp->set_mac_pending = 0;
4895
4896         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4897 }
4898
4899 static void bnx2x_update_coalesce(struct bnx2x *bp)
4900 {
4901         int port = BP_PORT(bp);
4902         int i;
4903
4904         for_each_queue(bp, i) {
4905                 int sb_id = bp->fp[i].sb_id;
4906
4907                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4908                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4909                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4910                                                       U_SB_ETH_RX_CQ_INDEX),
4911                         bp->rx_ticks/(4 * BNX2X_BTR));
4912                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4913                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4914                                                        U_SB_ETH_RX_CQ_INDEX),
4915                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4916
4917                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4918                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4919                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4920                                                       C_SB_ETH_TX_CQ_INDEX),
4921                         bp->tx_ticks/(4 * BNX2X_BTR));
4922                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4923                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4924                                                        C_SB_ETH_TX_CQ_INDEX),
4925                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4926         }
4927 }
4928
4929 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4930                                        struct bnx2x_fastpath *fp, int last)
4931 {
4932         int i;
4933
4934         for (i = 0; i < last; i++) {
4935                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4936                 struct sk_buff *skb = rx_buf->skb;
4937
4938                 if (skb == NULL) {
4939                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4940                         continue;
4941                 }
4942
4943                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4944                         pci_unmap_single(bp->pdev,
4945                                          pci_unmap_addr(rx_buf, mapping),
4946                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4947
4948                 dev_kfree_skb(skb);
4949                 rx_buf->skb = NULL;
4950         }
4951 }
4952
4953 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4954 {
4955         int func = BP_FUNC(bp);
4956         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4957                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4958         u16 ring_prod, cqe_ring_prod;
4959         int i, j;
4960
4961         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4962         DP(NETIF_MSG_IFUP,
4963            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4964
4965         if (bp->flags & TPA_ENABLE_FLAG) {
4966
4967                 for_each_queue(bp, j) {
4968                         struct bnx2x_fastpath *fp = &bp->fp[j];
4969
4970                         for (i = 0; i < max_agg_queues; i++) {
4971                                 fp->tpa_pool[i].skb =
4972                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4973                                 if (!fp->tpa_pool[i].skb) {
4974                                         BNX2X_ERR("Failed to allocate TPA "
4975                                                   "skb pool for queue[%d] - "
4976                                                   "disabling TPA on this "
4977                                                   "queue!\n", j);
4978                                         bnx2x_free_tpa_pool(bp, fp, i);
4979                                         fp->disable_tpa = 1;
4980                                         break;
4981                                 }
4982                                 pci_unmap_addr_set((struct sw_rx_bd *)
4983                                                         &bp->fp->tpa_pool[i],
4984                                                    mapping, 0);
4985                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4986                         }
4987                 }
4988         }
4989
4990         for_each_queue(bp, j) {
4991                 struct bnx2x_fastpath *fp = &bp->fp[j];
4992
4993                 fp->rx_bd_cons = 0;
4994                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4995                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4996
4997                 /* "next page" elements initialization */
4998                 /* SGE ring */
4999                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5000                         struct eth_rx_sge *sge;
5001
5002                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5003                         sge->addr_hi =
5004                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5005                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5006                         sge->addr_lo =
5007                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5008                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5009                 }
5010
5011                 bnx2x_init_sge_ring_bit_mask(fp);
5012
5013                 /* RX BD ring */
5014                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5015                         struct eth_rx_bd *rx_bd;
5016
5017                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5018                         rx_bd->addr_hi =
5019                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5020                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5021                         rx_bd->addr_lo =
5022                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5023                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5024                 }
5025
5026                 /* CQ ring */
5027                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5028                         struct eth_rx_cqe_next_page *nextpg;
5029
5030                         nextpg = (struct eth_rx_cqe_next_page *)
5031                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5032                         nextpg->addr_hi =
5033                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5034                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5035                         nextpg->addr_lo =
5036                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5037                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5038                 }
5039
5040                 /* Allocate SGEs and initialize the ring elements */
5041                 for (i = 0, ring_prod = 0;
5042                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5043
5044                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5045                                 BNX2X_ERR("was only able to allocate "
5046                                           "%d rx sges\n", i);
5047                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5048                                 /* Cleanup already allocated elements */
5049                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5050                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5051                                 fp->disable_tpa = 1;
5052                                 ring_prod = 0;
5053                                 break;
5054                         }
5055                         ring_prod = NEXT_SGE_IDX(ring_prod);
5056                 }
5057                 fp->rx_sge_prod = ring_prod;
5058
5059                 /* Allocate BDs and initialize BD ring */
5060                 fp->rx_comp_cons = 0;
5061                 cqe_ring_prod = ring_prod = 0;
5062                 for (i = 0; i < bp->rx_ring_size; i++) {
5063                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5064                                 BNX2X_ERR("was only able to allocate "
5065                                           "%d rx skbs on queue[%d]\n", i, j);
5066                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5067                                 break;
5068                         }
5069                         ring_prod = NEXT_RX_IDX(ring_prod);
5070                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5071                         WARN_ON(ring_prod <= i);
5072                 }
5073
5074                 fp->rx_bd_prod = ring_prod;
5075                 /* must not have more available CQEs than BDs */
5076                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5077                                        cqe_ring_prod);
5078                 fp->rx_pkt = fp->rx_calls = 0;
5079
5080                 /* Warning!
5081                  * this will generate an interrupt (to the TSTORM)
5082                  * must only be done after chip is initialized
5083                  */
5084                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5085                                      fp->rx_sge_prod);
5086                 if (j != 0)
5087                         continue;
5088
5089                 REG_WR(bp, BAR_USTRORM_INTMEM +
5090                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5091                        U64_LO(fp->rx_comp_mapping));
5092                 REG_WR(bp, BAR_USTRORM_INTMEM +
5093                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5094                        U64_HI(fp->rx_comp_mapping));
5095         }
5096 }
5097
5098 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5099 {
5100         int i, j;
5101
5102         for_each_queue(bp, j) {
5103                 struct bnx2x_fastpath *fp = &bp->fp[j];
5104
5105                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5106                         struct eth_tx_next_bd *tx_next_bd =
5107                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5108
5109                         tx_next_bd->addr_hi =
5110                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5111                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5112                         tx_next_bd->addr_lo =
5113                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5114                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5115                 }
5116
5117                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5118                 fp->tx_db.data.zero_fill1 = 0;
5119                 fp->tx_db.data.prod = 0;
5120
5121                 fp->tx_pkt_prod = 0;
5122                 fp->tx_pkt_cons = 0;
5123                 fp->tx_bd_prod = 0;
5124                 fp->tx_bd_cons = 0;
5125                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5126                 fp->tx_pkt = 0;
5127         }
5128 }
5129
5130 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5131 {
5132         int func = BP_FUNC(bp);
5133
5134         spin_lock_init(&bp->spq_lock);
5135
5136         bp->spq_left = MAX_SPQ_PENDING;
5137         bp->spq_prod_idx = 0;
5138         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5139         bp->spq_prod_bd = bp->spq;
5140         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5141
5142         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5143                U64_LO(bp->spq_mapping));
5144         REG_WR(bp,
5145                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5146                U64_HI(bp->spq_mapping));
5147
5148         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5149                bp->spq_prod_idx);
5150 }
5151
5152 static void bnx2x_init_context(struct bnx2x *bp)
5153 {
5154         int i;
5155
5156         /* Rx */
5157         for_each_queue(bp, i) {
5158                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5159                 struct bnx2x_fastpath *fp = &bp->fp[i];
5160                 u8 cl_id = fp->cl_id;
5161
5162                 context->ustorm_st_context.common.sb_index_numbers =
5163                                                 BNX2X_RX_SB_INDEX_NUM;
5164                 context->ustorm_st_context.common.clientId = cl_id;
5165                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5166                 context->ustorm_st_context.common.flags =
5167                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5168                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5169                 context->ustorm_st_context.common.statistics_counter_id =
5170                                                 cl_id;
5171                 context->ustorm_st_context.common.mc_alignment_log_size =
5172                                                 BNX2X_RX_ALIGN_SHIFT;
5173                 context->ustorm_st_context.common.bd_buff_size =
5174                                                 bp->rx_buf_size;
5175                 context->ustorm_st_context.common.bd_page_base_hi =
5176                                                 U64_HI(fp->rx_desc_mapping);
5177                 context->ustorm_st_context.common.bd_page_base_lo =
5178                                                 U64_LO(fp->rx_desc_mapping);
5179                 if (!fp->disable_tpa) {
5180                         context->ustorm_st_context.common.flags |=
5181                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5182                         context->ustorm_st_context.common.sge_buff_size =
5183                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5184                                          (u32)0xffff);
5185                         context->ustorm_st_context.common.sge_page_base_hi =
5186                                                 U64_HI(fp->rx_sge_mapping);
5187                         context->ustorm_st_context.common.sge_page_base_lo =
5188                                                 U64_LO(fp->rx_sge_mapping);
5189
5190                         context->ustorm_st_context.common.max_sges_for_packet =
5191                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5192                         context->ustorm_st_context.common.max_sges_for_packet =
5193                                 ((context->ustorm_st_context.common.
5194                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5195                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5196                 }
5197
5198                 context->ustorm_ag_context.cdu_usage =
5199                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5200                                                CDU_REGION_NUMBER_UCM_AG,
5201                                                ETH_CONNECTION_TYPE);
5202
5203                 context->xstorm_ag_context.cdu_reserved =
5204                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5205                                                CDU_REGION_NUMBER_XCM_AG,
5206                                                ETH_CONNECTION_TYPE);
5207         }
5208
5209         /* Tx */
5210         for_each_queue(bp, i) {
5211                 struct bnx2x_fastpath *fp = &bp->fp[i];
5212                 struct eth_context *context =
5213                         bnx2x_sp(bp, context[i].eth);
5214
5215                 context->cstorm_st_context.sb_index_number =
5216                                                 C_SB_ETH_TX_CQ_INDEX;
5217                 context->cstorm_st_context.status_block_id = fp->sb_id;
5218
5219                 context->xstorm_st_context.tx_bd_page_base_hi =
5220                                                 U64_HI(fp->tx_desc_mapping);
5221                 context->xstorm_st_context.tx_bd_page_base_lo =
5222                                                 U64_LO(fp->tx_desc_mapping);
5223                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5224                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5225         }
5226 }
5227
5228 static void bnx2x_init_ind_table(struct bnx2x *bp)
5229 {
5230         int func = BP_FUNC(bp);
5231         int i;
5232
5233         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5234                 return;
5235
5236         DP(NETIF_MSG_IFUP,
5237            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5238         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5239                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5240                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5241                         bp->fp->cl_id + (i % bp->num_queues));
5242 }
5243
5244 static void bnx2x_set_client_config(struct bnx2x *bp)
5245 {
5246         struct tstorm_eth_client_config tstorm_client = {0};
5247         int port = BP_PORT(bp);
5248         int i;
5249
5250         tstorm_client.mtu = bp->dev->mtu;
5251         tstorm_client.config_flags =
5252                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5253                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5254 #ifdef BCM_VLAN
5255         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5256                 tstorm_client.config_flags |=
5257                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5258                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5259         }
5260 #endif
5261
5262         for_each_queue(bp, i) {
5263                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5264
5265                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5266                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5267                        ((u32 *)&tstorm_client)[0]);
5268                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5269                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5270                        ((u32 *)&tstorm_client)[1]);
5271         }
5272
5273         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5274            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5275 }
5276
5277 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5278 {
5279         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5280         int mode = bp->rx_mode;
5281         int mask = bp->rx_mode_cl_mask;
5282         int func = BP_FUNC(bp);
5283         int port = BP_PORT(bp);
5284         int i;
5285         /* All but management unicast packets should pass to the host as well */
5286         u32 llh_mask =
5287                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5288                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5289                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5290                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5291
5292         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5293
5294         switch (mode) {
5295         case BNX2X_RX_MODE_NONE: /* no Rx */
5296                 tstorm_mac_filter.ucast_drop_all = mask;
5297                 tstorm_mac_filter.mcast_drop_all = mask;
5298                 tstorm_mac_filter.bcast_drop_all = mask;
5299                 break;
5300
5301         case BNX2X_RX_MODE_NORMAL:
5302                 tstorm_mac_filter.bcast_accept_all = mask;
5303                 break;
5304
5305         case BNX2X_RX_MODE_ALLMULTI:
5306                 tstorm_mac_filter.mcast_accept_all = mask;
5307                 tstorm_mac_filter.bcast_accept_all = mask;
5308                 break;
5309
5310         case BNX2X_RX_MODE_PROMISC:
5311                 tstorm_mac_filter.ucast_accept_all = mask;
5312                 tstorm_mac_filter.mcast_accept_all = mask;
5313                 tstorm_mac_filter.bcast_accept_all = mask;
5314                 /* pass management unicast packets as well */
5315                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5316                 break;
5317
5318         default:
5319                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5320                 break;
5321         }
5322
5323         REG_WR(bp,
5324                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5325                llh_mask);
5326
5327         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5328                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5329                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5330                        ((u32 *)&tstorm_mac_filter)[i]);
5331
5332 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5333                    ((u32 *)&tstorm_mac_filter)[i]); */
5334         }
5335
5336         if (mode != BNX2X_RX_MODE_NONE)
5337                 bnx2x_set_client_config(bp);
5338 }
5339
5340 static void bnx2x_init_internal_common(struct bnx2x *bp)
5341 {
5342         int i;
5343
5344         /* Zero this manually as its initialization is
5345            currently missing in the initTool */
5346         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5347                 REG_WR(bp, BAR_USTRORM_INTMEM +
5348                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5349 }
5350
5351 static void bnx2x_init_internal_port(struct bnx2x *bp)
5352 {
5353         int port = BP_PORT(bp);
5354
5355         REG_WR(bp,
5356                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5357         REG_WR(bp,
5358                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5359         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5360         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5361 }
5362
5363 static void bnx2x_init_internal_func(struct bnx2x *bp)
5364 {
5365         struct tstorm_eth_function_common_config tstorm_config = {0};
5366         struct stats_indication_flags stats_flags = {0};
5367         int port = BP_PORT(bp);
5368         int func = BP_FUNC(bp);
5369         int i, j;
5370         u32 offset;
5371         u16 max_agg_size;
5372
5373         if (is_multi(bp)) {
5374                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5375                 tstorm_config.rss_result_mask = MULTI_MASK;
5376         }
5377
5378         /* Enable TPA if needed */
5379         if (bp->flags & TPA_ENABLE_FLAG)
5380                 tstorm_config.config_flags |=
5381                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5382
5383         if (IS_E1HMF(bp))
5384                 tstorm_config.config_flags |=
5385                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5386
5387         tstorm_config.leading_client_id = BP_L_ID(bp);
5388
5389         REG_WR(bp, BAR_TSTRORM_INTMEM +
5390                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5391                (*(u32 *)&tstorm_config));
5392
5393         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5394         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5395         bnx2x_set_storm_rx_mode(bp);
5396
5397         for_each_queue(bp, i) {
5398                 u8 cl_id = bp->fp[i].cl_id;
5399
5400                 /* reset xstorm per client statistics */
5401                 offset = BAR_XSTRORM_INTMEM +
5402                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5403                 for (j = 0;
5404                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5405                         REG_WR(bp, offset + j*4, 0);
5406
5407                 /* reset tstorm per client statistics */
5408                 offset = BAR_TSTRORM_INTMEM +
5409                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5410                 for (j = 0;
5411                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5412                         REG_WR(bp, offset + j*4, 0);
5413
5414                 /* reset ustorm per client statistics */
5415                 offset = BAR_USTRORM_INTMEM +
5416                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5417                 for (j = 0;
5418                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5419                         REG_WR(bp, offset + j*4, 0);
5420         }
5421
5422         /* Init statistics related context */
5423         stats_flags.collect_eth = 1;
5424
5425         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5426                ((u32 *)&stats_flags)[0]);
5427         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5428                ((u32 *)&stats_flags)[1]);
5429
5430         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5431                ((u32 *)&stats_flags)[0]);
5432         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5433                ((u32 *)&stats_flags)[1]);
5434
5435         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5436                ((u32 *)&stats_flags)[0]);
5437         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5438                ((u32 *)&stats_flags)[1]);
5439
5440         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5441                ((u32 *)&stats_flags)[0]);
5442         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5443                ((u32 *)&stats_flags)[1]);
5444
5445         REG_WR(bp, BAR_XSTRORM_INTMEM +
5446                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5447                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5448         REG_WR(bp, BAR_XSTRORM_INTMEM +
5449                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5450                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5451
5452         REG_WR(bp, BAR_TSTRORM_INTMEM +
5453                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5454                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5455         REG_WR(bp, BAR_TSTRORM_INTMEM +
5456                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5457                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5458
5459         REG_WR(bp, BAR_USTRORM_INTMEM +
5460                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5461                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5462         REG_WR(bp, BAR_USTRORM_INTMEM +
5463                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5464                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5465
5466         if (CHIP_IS_E1H(bp)) {
5467                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5468                         IS_E1HMF(bp));
5469                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5470                         IS_E1HMF(bp));
5471                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5472                         IS_E1HMF(bp));
5473                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5474                         IS_E1HMF(bp));
5475
5476                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5477                          bp->e1hov);
5478         }
5479
5480         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5481         max_agg_size =
5482                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5483                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5484                     (u32)0xffff);
5485         for_each_queue(bp, i) {
5486                 struct bnx2x_fastpath *fp = &bp->fp[i];
5487
5488                 REG_WR(bp, BAR_USTRORM_INTMEM +
5489                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5490                        U64_LO(fp->rx_comp_mapping));
5491                 REG_WR(bp, BAR_USTRORM_INTMEM +
5492                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5493                        U64_HI(fp->rx_comp_mapping));
5494
5495                 /* Next page */
5496                 REG_WR(bp, BAR_USTRORM_INTMEM +
5497                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5498                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5499                 REG_WR(bp, BAR_USTRORM_INTMEM +
5500                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5501                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5502
5503                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5504                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5505                          max_agg_size);
5506         }
5507
5508         /* dropless flow control */
5509         if (CHIP_IS_E1H(bp)) {
5510                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5511
5512                 rx_pause.bd_thr_low = 250;
5513                 rx_pause.cqe_thr_low = 250;
5514                 rx_pause.cos = 1;
5515                 rx_pause.sge_thr_low = 0;
5516                 rx_pause.bd_thr_high = 350;
5517                 rx_pause.cqe_thr_high = 350;
5518                 rx_pause.sge_thr_high = 0;
5519
5520                 for_each_queue(bp, i) {
5521                         struct bnx2x_fastpath *fp = &bp->fp[i];
5522
5523                         if (!fp->disable_tpa) {
5524                                 rx_pause.sge_thr_low = 150;
5525                                 rx_pause.sge_thr_high = 250;
5526                         }
5527
5528
5529                         offset = BAR_USTRORM_INTMEM +
5530                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5531                                                                    fp->cl_id);
5532                         for (j = 0;
5533                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5534                              j++)
5535                                 REG_WR(bp, offset + j*4,
5536                                        ((u32 *)&rx_pause)[j]);
5537                 }
5538         }
5539
5540         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5541
5542         /* Init rate shaping and fairness contexts */
5543         if (IS_E1HMF(bp)) {
5544                 int vn;
5545
5546                 /* During init there is no active link
5547                    Until link is up, set link rate to 10Gbps */
5548                 bp->link_vars.line_speed = SPEED_10000;
5549                 bnx2x_init_port_minmax(bp);
5550
5551                 if (!BP_NOMCP(bp))
5552                         bp->mf_config =
5553                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5554                 bnx2x_calc_vn_weight_sum(bp);
5555
5556                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5557                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5558
5559                 /* Enable rate shaping and fairness */
5560                 bp->cmng.flags.cmng_enables |=
5561                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5562
5563         } else {
5564                 /* rate shaping and fairness are disabled */
5565                 DP(NETIF_MSG_IFUP,
5566                    "single function mode  minmax will be disabled\n");
5567         }
5568
5569
5570         /* Store it to internal memory */
5571         if (bp->port.pmf)
5572                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5573                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5574                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5575                                ((u32 *)(&bp->cmng))[i]);
5576 }
5577
5578 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5579 {
5580         switch (load_code) {
5581         case FW_MSG_CODE_DRV_LOAD_COMMON:
5582                 bnx2x_init_internal_common(bp);
5583                 /* no break */
5584
5585         case FW_MSG_CODE_DRV_LOAD_PORT:
5586                 bnx2x_init_internal_port(bp);
5587                 /* no break */
5588
5589         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5590                 bnx2x_init_internal_func(bp);
5591                 break;
5592
5593         default:
5594                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5595                 break;
5596         }
5597 }
5598
5599 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5600 {
5601         int i;
5602
5603         for_each_queue(bp, i) {
5604                 struct bnx2x_fastpath *fp = &bp->fp[i];
5605
5606                 fp->bp = bp;
5607                 fp->state = BNX2X_FP_STATE_CLOSED;
5608                 fp->index = i;
5609                 fp->cl_id = BP_L_ID(bp) + i;
5610 #ifdef BCM_CNIC
5611                 fp->sb_id = fp->cl_id + 1;
5612 #else
5613                 fp->sb_id = fp->cl_id;
5614 #endif
5615                 DP(NETIF_MSG_IFUP,
5616                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5617                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5618                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5619                               fp->sb_id);
5620                 bnx2x_update_fpsb_idx(fp);
5621         }
5622
5623         /* ensure status block indices were read */
5624         rmb();
5625
5626
5627         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5628                           DEF_SB_ID);
5629         bnx2x_update_dsb_idx(bp);
5630         bnx2x_update_coalesce(bp);
5631         bnx2x_init_rx_rings(bp);
5632         bnx2x_init_tx_ring(bp);
5633         bnx2x_init_sp_ring(bp);
5634         bnx2x_init_context(bp);
5635         bnx2x_init_internal(bp, load_code);
5636         bnx2x_init_ind_table(bp);
5637         bnx2x_stats_init(bp);
5638
5639         /* At this point, we are ready for interrupts */
5640         atomic_set(&bp->intr_sem, 0);
5641
5642         /* flush all before enabling interrupts */
5643         mb();
5644         mmiowb();
5645
5646         bnx2x_int_enable(bp);
5647
5648         /* Check for SPIO5 */
5649         bnx2x_attn_int_deasserted0(bp,
5650                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5651                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5652 }
5653
5654 /* end of nic init */
5655
5656 /*
5657  * gzip service functions
5658  */
5659
5660 static int bnx2x_gunzip_init(struct bnx2x *bp)
5661 {
5662         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5663                                               &bp->gunzip_mapping);
5664         if (bp->gunzip_buf  == NULL)
5665                 goto gunzip_nomem1;
5666
5667         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5668         if (bp->strm  == NULL)
5669                 goto gunzip_nomem2;
5670
5671         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5672                                       GFP_KERNEL);
5673         if (bp->strm->workspace == NULL)
5674                 goto gunzip_nomem3;
5675
5676         return 0;
5677
5678 gunzip_nomem3:
5679         kfree(bp->strm);
5680         bp->strm = NULL;
5681
5682 gunzip_nomem2:
5683         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5684                             bp->gunzip_mapping);
5685         bp->gunzip_buf = NULL;
5686
5687 gunzip_nomem1:
5688         netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
5689         return -ENOMEM;
5690 }
5691
5692 static void bnx2x_gunzip_end(struct bnx2x *bp)
5693 {
5694         kfree(bp->strm->workspace);
5695
5696         kfree(bp->strm);
5697         bp->strm = NULL;
5698
5699         if (bp->gunzip_buf) {
5700                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5701                                     bp->gunzip_mapping);
5702                 bp->gunzip_buf = NULL;
5703         }
5704 }
5705
5706 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5707 {
5708         int n, rc;
5709
5710         /* check gzip header */
5711         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5712                 BNX2X_ERR("Bad gzip header\n");
5713                 return -EINVAL;
5714         }
5715
5716         n = 10;
5717
5718 #define FNAME                           0x8
5719
5720         if (zbuf[3] & FNAME)
5721                 while ((zbuf[n++] != 0) && (n < len));
5722
5723         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5724         bp->strm->avail_in = len - n;
5725         bp->strm->next_out = bp->gunzip_buf;
5726         bp->strm->avail_out = FW_BUF_SIZE;
5727
5728         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5729         if (rc != Z_OK)
5730                 return rc;
5731
5732         rc = zlib_inflate(bp->strm, Z_FINISH);
5733         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5734                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5735                            bp->strm->msg);
5736
5737         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5738         if (bp->gunzip_outlen & 0x3)
5739                 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5740                            bp->gunzip_outlen);
5741         bp->gunzip_outlen >>= 2;
5742
5743         zlib_inflateEnd(bp->strm);
5744
5745         if (rc == Z_STREAM_END)
5746                 return 0;
5747
5748         return rc;
5749 }
5750
5751 /* nic load/unload */
5752
5753 /*
5754  * General service functions
5755  */
5756
5757 /* send a NIG loopback debug packet */
5758 static void bnx2x_lb_pckt(struct bnx2x *bp)
5759 {
5760         u32 wb_write[3];
5761
5762         /* Ethernet source and destination addresses */
5763         wb_write[0] = 0x55555555;
5764         wb_write[1] = 0x55555555;
5765         wb_write[2] = 0x20;             /* SOP */
5766         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5767
5768         /* NON-IP protocol */
5769         wb_write[0] = 0x09000000;
5770         wb_write[1] = 0x55555555;
5771         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5772         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5773 }
5774
5775 /* some of the internal memories
5776  * are not directly readable from the driver
5777  * to test them we send debug packets
5778  */
5779 static int bnx2x_int_mem_test(struct bnx2x *bp)
5780 {
5781         int factor;
5782         int count, i;
5783         u32 val = 0;
5784
5785         if (CHIP_REV_IS_FPGA(bp))
5786                 factor = 120;
5787         else if (CHIP_REV_IS_EMUL(bp))
5788                 factor = 200;
5789         else
5790                 factor = 1;
5791
5792         DP(NETIF_MSG_HW, "start part1\n");
5793
5794         /* Disable inputs of parser neighbor blocks */
5795         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5796         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5797         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5798         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5799
5800         /*  Write 0 to parser credits for CFC search request */
5801         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5802
5803         /* send Ethernet packet */
5804         bnx2x_lb_pckt(bp);
5805
5806         /* TODO do i reset NIG statistic? */
5807         /* Wait until NIG register shows 1 packet of size 0x10 */
5808         count = 1000 * factor;
5809         while (count) {
5810
5811                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5812                 val = *bnx2x_sp(bp, wb_data[0]);
5813                 if (val == 0x10)
5814                         break;
5815
5816                 msleep(10);
5817                 count--;
5818         }
5819         if (val != 0x10) {
5820                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5821                 return -1;
5822         }
5823
5824         /* Wait until PRS register shows 1 packet */
5825         count = 1000 * factor;
5826         while (count) {
5827                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5828                 if (val == 1)
5829                         break;
5830
5831                 msleep(10);
5832                 count--;
5833         }
5834         if (val != 0x1) {
5835                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5836                 return -2;
5837         }
5838
5839         /* Reset and init BRB, PRS */
5840         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5841         msleep(50);
5842         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5843         msleep(50);
5844         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5845         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5846
5847         DP(NETIF_MSG_HW, "part2\n");
5848
5849         /* Disable inputs of parser neighbor blocks */
5850         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5851         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5852         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5853         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5854
5855         /* Write 0 to parser credits for CFC search request */
5856         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5857
5858         /* send 10 Ethernet packets */
5859         for (i = 0; i < 10; i++)
5860                 bnx2x_lb_pckt(bp);
5861
5862         /* Wait until NIG register shows 10 + 1
5863            packets of size 11*0x10 = 0xb0 */
5864         count = 1000 * factor;
5865         while (count) {
5866
5867                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5868                 val = *bnx2x_sp(bp, wb_data[0]);
5869                 if (val == 0xb0)
5870                         break;
5871
5872                 msleep(10);
5873                 count--;
5874         }
5875         if (val != 0xb0) {
5876                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5877                 return -3;
5878         }
5879
5880         /* Wait until PRS register shows 2 packets */
5881         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5882         if (val != 2)
5883                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5884
5885         /* Write 1 to parser credits for CFC search request */
5886         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5887
5888         /* Wait until PRS register shows 3 packets */
5889         msleep(10 * factor);
5890         /* Wait until NIG register shows 1 packet of size 0x10 */
5891         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5892         if (val != 3)
5893                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5894
5895         /* clear NIG EOP FIFO */
5896         for (i = 0; i < 11; i++)
5897                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5898         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5899         if (val != 1) {
5900                 BNX2X_ERR("clear of NIG failed\n");
5901                 return -4;
5902         }
5903
5904         /* Reset and init BRB, PRS, NIG */
5905         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5906         msleep(50);
5907         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5908         msleep(50);
5909         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5910         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5911 #ifndef BCM_CNIC
5912         /* set NIC mode */
5913         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5914 #endif
5915
5916         /* Enable inputs of parser neighbor blocks */
5917         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5918         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5919         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5920         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5921
5922         DP(NETIF_MSG_HW, "done\n");
5923
5924         return 0; /* OK */
5925 }
5926
5927 static void enable_blocks_attention(struct bnx2x *bp)
5928 {
5929         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5930         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5931         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5932         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5933         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5934         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5935         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5936         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5937         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5938 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5939 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5940         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5941         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5942         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5943 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5944 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5945         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5946         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5947         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5948         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5949 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5950 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5951         if (CHIP_REV_IS_FPGA(bp))
5952                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5953         else
5954                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5955         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5956         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5957         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5958 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5959 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5960         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5961         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5962 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5963         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5964 }
5965
5966
5967 static void bnx2x_reset_common(struct bnx2x *bp)
5968 {
5969         /* reset_common */
5970         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5971                0xd3ffff7f);
5972         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5973 }
5974
5975 static void bnx2x_init_pxp(struct bnx2x *bp)
5976 {
5977         u16 devctl;
5978         int r_order, w_order;
5979
5980         pci_read_config_word(bp->pdev,
5981                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5982         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5983         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5984         if (bp->mrrs == -1)
5985                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5986         else {
5987                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5988                 r_order = bp->mrrs;
5989         }
5990
5991         bnx2x_init_pxp_arb(bp, r_order, w_order);
5992 }
5993
5994 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5995 {
5996         u32 val;
5997         u8 port;
5998         u8 is_required = 0;
5999
6000         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6001               SHARED_HW_CFG_FAN_FAILURE_MASK;
6002
6003         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6004                 is_required = 1;
6005
6006         /*
6007          * The fan failure mechanism is usually related to the PHY type since
6008          * the power consumption of the board is affected by the PHY. Currently,
6009          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6010          */
6011         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6012                 for (port = PORT_0; port < PORT_MAX; port++) {
6013                         u32 phy_type =
6014                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6015                                          external_phy_config) &
6016                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6017                         is_required |=
6018                                 ((phy_type ==
6019                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6020                                  (phy_type ==
6021                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6022                                  (phy_type ==
6023                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6024                 }
6025
6026         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6027
6028         if (is_required == 0)
6029                 return;
6030
6031         /* Fan failure is indicated by SPIO 5 */
6032         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6033                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6034
6035         /* set to active low mode */
6036         val = REG_RD(bp, MISC_REG_SPIO_INT);
6037         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6038                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6039         REG_WR(bp, MISC_REG_SPIO_INT, val);
6040
6041         /* enable interrupt to signal the IGU */
6042         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6043         val |= (1 << MISC_REGISTERS_SPIO_5);
6044         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6045 }
6046
6047 static int bnx2x_init_common(struct bnx2x *bp)
6048 {
6049         u32 val, i;
6050 #ifdef BCM_CNIC
6051         u32 wb_write[2];
6052 #endif
6053
6054         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6055
6056         bnx2x_reset_common(bp);
6057         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6058         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6059
6060         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6061         if (CHIP_IS_E1H(bp))
6062                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6063
6064         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6065         msleep(30);
6066         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6067
6068         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6069         if (CHIP_IS_E1(bp)) {
6070                 /* enable HW interrupt from PXP on USDM overflow
6071                    bit 16 on INT_MASK_0 */
6072                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6073         }
6074
6075         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6076         bnx2x_init_pxp(bp);
6077
6078 #ifdef __BIG_ENDIAN
6079         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6080         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6081         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6082         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6083         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6084         /* make sure this value is 0 */
6085         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6086
6087 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6088         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6089         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6090         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6091         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6092 #endif
6093
6094         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6095 #ifdef BCM_CNIC
6096         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6097         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6098         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6099 #endif
6100
6101         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6102                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6103
6104         /* let the HW do it's magic ... */
6105         msleep(100);
6106         /* finish PXP init */
6107         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6108         if (val != 1) {
6109                 BNX2X_ERR("PXP2 CFG failed\n");
6110                 return -EBUSY;
6111         }
6112         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6113         if (val != 1) {
6114                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6115                 return -EBUSY;
6116         }
6117
6118         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6119         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6120
6121         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6122
6123         /* clean the DMAE memory */
6124         bp->dmae_ready = 1;
6125         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6126
6127         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6128         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6129         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6130         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6131
6132         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6133         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6134         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6135         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6136
6137         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6138
6139 #ifdef BCM_CNIC
6140         wb_write[0] = 0;
6141         wb_write[1] = 0;
6142         for (i = 0; i < 64; i++) {
6143                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6144                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6145
6146                 if (CHIP_IS_E1H(bp)) {
6147                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6148                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6149                                           wb_write, 2);
6150                 }
6151         }
6152 #endif
6153         /* soft reset pulse */
6154         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6155         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6156
6157 #ifdef BCM_CNIC
6158         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6159 #endif
6160
6161         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6162         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6163         if (!CHIP_REV_IS_SLOW(bp)) {
6164                 /* enable hw interrupt from doorbell Q */
6165                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6166         }
6167
6168         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6169         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6170         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6171 #ifndef BCM_CNIC
6172         /* set NIC mode */
6173         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6174 #endif
6175         if (CHIP_IS_E1H(bp))
6176                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6177
6178         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6179         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6180         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6181         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6182
6183         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6184         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6185         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6186         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6187
6188         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6189         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6190         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6191         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6192
6193         /* sync semi rtc */
6194         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6195                0x80000000);
6196         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6197                0x80000000);
6198
6199         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6200         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6201         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6202
6203         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6204         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6205                 REG_WR(bp, i, 0xc0cac01a);
6206                 /* TODO: replace with something meaningful */
6207         }
6208         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6209 #ifdef BCM_CNIC
6210         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6211         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6212         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6213         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6214         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6215         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6216         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6217         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6218         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6219         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6220 #endif
6221         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6222
6223         if (sizeof(union cdu_context) != 1024)
6224                 /* we currently assume that a context is 1024 bytes */
6225                 pr_alert("please adjust the size of cdu_context(%ld)\n",
6226                          (long)sizeof(union cdu_context));
6227
6228         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6229         val = (4 << 24) + (0 << 12) + 1024;
6230         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6231
6232         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6233         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6234         /* enable context validation interrupt from CFC */
6235         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6236
6237         /* set the thresholds to prevent CFC/CDU race */
6238         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6239
6240         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6241         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6242
6243         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6244         /* Reset PCIE errors for debug */
6245         REG_WR(bp, 0x2814, 0xffffffff);
6246         REG_WR(bp, 0x3820, 0xffffffff);
6247
6248         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6249         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6250         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6251         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6252
6253         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6254         if (CHIP_IS_E1H(bp)) {
6255                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6256                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6257         }
6258
6259         if (CHIP_REV_IS_SLOW(bp))
6260                 msleep(200);
6261
6262         /* finish CFC init */
6263         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6264         if (val != 1) {
6265                 BNX2X_ERR("CFC LL_INIT failed\n");
6266                 return -EBUSY;
6267         }
6268         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6269         if (val != 1) {
6270                 BNX2X_ERR("CFC AC_INIT failed\n");
6271                 return -EBUSY;
6272         }
6273         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6274         if (val != 1) {
6275                 BNX2X_ERR("CFC CAM_INIT failed\n");
6276                 return -EBUSY;
6277         }
6278         REG_WR(bp, CFC_REG_DEBUG0, 0);
6279
6280         /* read NIG statistic
6281            to see if this is our first up since powerup */
6282         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6283         val = *bnx2x_sp(bp, wb_data[0]);
6284
6285         /* do internal memory self test */
6286         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6287                 BNX2X_ERR("internal mem self test failed\n");
6288                 return -EBUSY;
6289         }
6290
6291         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6292         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6293         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6294         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6295         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6296                 bp->port.need_hw_lock = 1;
6297                 break;
6298
6299         default:
6300                 break;
6301         }
6302
6303         bnx2x_setup_fan_failure_detection(bp);
6304
6305         /* clear PXP2 attentions */
6306         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6307
6308         enable_blocks_attention(bp);
6309
6310         if (!BP_NOMCP(bp)) {
6311                 bnx2x_acquire_phy_lock(bp);
6312                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6313                 bnx2x_release_phy_lock(bp);
6314         } else
6315                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6316
6317         return 0;
6318 }
6319
6320 static int bnx2x_init_port(struct bnx2x *bp)
6321 {
6322         int port = BP_PORT(bp);
6323         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6324         u32 low, high;
6325         u32 val;
6326
6327         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6328
6329         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6330
6331         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6332         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6333
6334         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6335         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6336         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6337         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6338
6339 #ifdef BCM_CNIC
6340         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6341
6342         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6343         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6344         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6345 #endif
6346         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6347
6348         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6349         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6350                 /* no pause for emulation and FPGA */
6351                 low = 0;
6352                 high = 513;
6353         } else {
6354                 if (IS_E1HMF(bp))
6355                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6356                 else if (bp->dev->mtu > 4096) {
6357                         if (bp->flags & ONE_PORT_FLAG)
6358                                 low = 160;
6359                         else {
6360                                 val = bp->dev->mtu;
6361                                 /* (24*1024 + val*4)/256 */
6362                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6363                         }
6364                 } else
6365                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6366                 high = low + 56;        /* 14*1024/256 */
6367         }
6368         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6369         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6370
6371
6372         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6373
6374         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6375         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6376         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6377         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6378
6379         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6380         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6381         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6382         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6383
6384         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6385         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6386
6387         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6388
6389         /* configure PBF to work without PAUSE mtu 9000 */
6390         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6391
6392         /* update threshold */
6393         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6394         /* update init credit */
6395         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6396
6397         /* probe changes */
6398         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6399         msleep(5);
6400         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6401
6402 #ifdef BCM_CNIC
6403         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6404 #endif
6405         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6406         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6407
6408         if (CHIP_IS_E1(bp)) {
6409                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6410                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6411         }
6412         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6413
6414         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6415         /* init aeu_mask_attn_func_0/1:
6416          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6417          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6418          *             bits 4-7 are used for "per vn group attention" */
6419         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6420                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6421
6422         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6423         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6424         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6425         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6426         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6427
6428         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6429
6430         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6431
6432         if (CHIP_IS_E1H(bp)) {
6433                 /* 0x2 disable e1hov, 0x1 enable */
6434                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6435                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6436
6437                 {
6438                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6439                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6440                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6441                 }
6442         }
6443
6444         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6445         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6446
6447         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6448         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6449                 {
6450                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6451
6452                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6453                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6454
6455                 /* The GPIO should be swapped if the swap register is
6456                    set and active */
6457                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6458                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6459
6460                 /* Select function upon port-swap configuration */
6461                 if (port == 0) {
6462                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6463                         aeu_gpio_mask = (swap_val && swap_override) ?
6464                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6465                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6466                 } else {
6467                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6468                         aeu_gpio_mask = (swap_val && swap_override) ?
6469                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6470                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6471                 }
6472                 val = REG_RD(bp, offset);
6473                 /* add GPIO3 to group */
6474                 val |= aeu_gpio_mask;
6475                 REG_WR(bp, offset, val);
6476                 }
6477                 break;
6478
6479         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6480         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6481                 /* add SPIO 5 to group 0 */
6482                 {
6483                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6484                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6485                 val = REG_RD(bp, reg_addr);
6486                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6487                 REG_WR(bp, reg_addr, val);
6488                 }
6489                 break;
6490
6491         default:
6492                 break;
6493         }
6494
6495         bnx2x__link_reset(bp);
6496
6497         return 0;
6498 }
6499
6500 #define ILT_PER_FUNC            (768/2)
6501 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6502 /* the phys address is shifted right 12 bits and has an added
6503    1=valid bit added to the 53rd bit
6504    then since this is a wide register(TM)
6505    we split it into two 32 bit writes
6506  */
6507 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6508 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6509 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6510 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6511
6512 #ifdef BCM_CNIC
6513 #define CNIC_ILT_LINES          127
6514 #define CNIC_CTX_PER_ILT        16
6515 #else
6516 #define CNIC_ILT_LINES          0
6517 #endif
6518
6519 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6520 {
6521         int reg;
6522
6523         if (CHIP_IS_E1H(bp))
6524                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6525         else /* E1 */
6526                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6527
6528         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6529 }
6530
6531 static int bnx2x_init_func(struct bnx2x *bp)
6532 {
6533         int port = BP_PORT(bp);
6534         int func = BP_FUNC(bp);
6535         u32 addr, val;
6536         int i;
6537
6538         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6539
6540         /* set MSI reconfigure capability */
6541         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6542         val = REG_RD(bp, addr);
6543         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6544         REG_WR(bp, addr, val);
6545
6546         i = FUNC_ILT_BASE(func);
6547
6548         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6549         if (CHIP_IS_E1H(bp)) {
6550                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6551                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6552         } else /* E1 */
6553                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6554                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6555
6556 #ifdef BCM_CNIC
6557         i += 1 + CNIC_ILT_LINES;
6558         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6559         if (CHIP_IS_E1(bp))
6560                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6561         else {
6562                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6563                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6564         }
6565
6566         i++;
6567         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6568         if (CHIP_IS_E1(bp))
6569                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6570         else {
6571                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6572                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6573         }
6574
6575         i++;
6576         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6577         if (CHIP_IS_E1(bp))
6578                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6579         else {
6580                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6581                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6582         }
6583
6584         /* tell the searcher where the T2 table is */
6585         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6586
6587         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6588                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6589
6590         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6591                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6592                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6593
6594         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6595 #endif
6596
6597         if (CHIP_IS_E1H(bp)) {
6598                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6599                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6600                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6601                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6602                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6603                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6604                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6605                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6606                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6607
6608                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6609                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6610         }
6611
6612         /* HC init per function */
6613         if (CHIP_IS_E1H(bp)) {
6614                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6615
6616                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6617                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6618         }
6619         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6620
6621         /* Reset PCIE errors for debug */
6622         REG_WR(bp, 0x2114, 0xffffffff);
6623         REG_WR(bp, 0x2120, 0xffffffff);
6624
6625         return 0;
6626 }
6627
6628 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6629 {
6630         int i, rc = 0;
6631
6632         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6633            BP_FUNC(bp), load_code);
6634
6635         bp->dmae_ready = 0;
6636         mutex_init(&bp->dmae_mutex);
6637         rc = bnx2x_gunzip_init(bp);
6638         if (rc)
6639                 return rc;
6640
6641         switch (load_code) {
6642         case FW_MSG_CODE_DRV_LOAD_COMMON:
6643                 rc = bnx2x_init_common(bp);
6644                 if (rc)
6645                         goto init_hw_err;
6646                 /* no break */
6647
6648         case FW_MSG_CODE_DRV_LOAD_PORT:
6649                 bp->dmae_ready = 1;
6650                 rc = bnx2x_init_port(bp);
6651                 if (rc)
6652                         goto init_hw_err;
6653                 /* no break */
6654
6655         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6656                 bp->dmae_ready = 1;
6657                 rc = bnx2x_init_func(bp);
6658                 if (rc)
6659                         goto init_hw_err;
6660                 break;
6661
6662         default:
6663                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6664                 break;
6665         }
6666
6667         if (!BP_NOMCP(bp)) {
6668                 int func = BP_FUNC(bp);
6669
6670                 bp->fw_drv_pulse_wr_seq =
6671                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6672                                  DRV_PULSE_SEQ_MASK);
6673                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6674         }
6675
6676         /* this needs to be done before gunzip end */
6677         bnx2x_zero_def_sb(bp);
6678         for_each_queue(bp, i)
6679                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6680 #ifdef BCM_CNIC
6681         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6682 #endif
6683
6684 init_hw_err:
6685         bnx2x_gunzip_end(bp);
6686
6687         return rc;
6688 }
6689
6690 static void bnx2x_free_mem(struct bnx2x *bp)
6691 {
6692
6693 #define BNX2X_PCI_FREE(x, y, size) \
6694         do { \
6695                 if (x) { \
6696                         pci_free_consistent(bp->pdev, size, x, y); \
6697                         x = NULL; \
6698                         y = 0; \
6699                 } \
6700         } while (0)
6701
6702 #define BNX2X_FREE(x) \
6703         do { \
6704                 if (x) { \
6705                         vfree(x); \
6706                         x = NULL; \
6707                 } \
6708         } while (0)
6709
6710         int i;
6711
6712         /* fastpath */
6713         /* Common */
6714         for_each_queue(bp, i) {
6715
6716                 /* status blocks */
6717                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6718                                bnx2x_fp(bp, i, status_blk_mapping),
6719                                sizeof(struct host_status_block));
6720         }
6721         /* Rx */
6722         for_each_queue(bp, i) {
6723
6724                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6725                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6726                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6727                                bnx2x_fp(bp, i, rx_desc_mapping),
6728                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6729
6730                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6731                                bnx2x_fp(bp, i, rx_comp_mapping),
6732                                sizeof(struct eth_fast_path_rx_cqe) *
6733                                NUM_RCQ_BD);
6734
6735                 /* SGE ring */
6736                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6737                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6738                                bnx2x_fp(bp, i, rx_sge_mapping),
6739                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6740         }
6741         /* Tx */
6742         for_each_queue(bp, i) {
6743
6744                 /* fastpath tx rings: tx_buf tx_desc */
6745                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6746                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6747                                bnx2x_fp(bp, i, tx_desc_mapping),
6748                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6749         }
6750         /* end of fastpath */
6751
6752         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6753                        sizeof(struct host_def_status_block));
6754
6755         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6756                        sizeof(struct bnx2x_slowpath));
6757
6758 #ifdef BCM_CNIC
6759         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6760         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6761         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6762         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6763         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6764                        sizeof(struct host_status_block));
6765 #endif
6766         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6767
6768 #undef BNX2X_PCI_FREE
6769 #undef BNX2X_KFREE
6770 }
6771
6772 static int bnx2x_alloc_mem(struct bnx2x *bp)
6773 {
6774
6775 #define BNX2X_PCI_ALLOC(x, y, size) \
6776         do { \
6777                 x = pci_alloc_consistent(bp->pdev, size, y); \
6778                 if (x == NULL) \
6779                         goto alloc_mem_err; \
6780                 memset(x, 0, size); \
6781         } while (0)
6782
6783 #define BNX2X_ALLOC(x, size) \
6784         do { \
6785                 x = vmalloc(size); \
6786                 if (x == NULL) \
6787                         goto alloc_mem_err; \
6788                 memset(x, 0, size); \
6789         } while (0)
6790
6791         int i;
6792
6793         /* fastpath */
6794         /* Common */
6795         for_each_queue(bp, i) {
6796                 bnx2x_fp(bp, i, bp) = bp;
6797
6798                 /* status blocks */
6799                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6800                                 &bnx2x_fp(bp, i, status_blk_mapping),
6801                                 sizeof(struct host_status_block));
6802         }
6803         /* Rx */
6804         for_each_queue(bp, i) {
6805
6806                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6807                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6808                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6809                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6810                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6811                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6812
6813                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6814                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6815                                 sizeof(struct eth_fast_path_rx_cqe) *
6816                                 NUM_RCQ_BD);
6817
6818                 /* SGE ring */
6819                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6820                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6821                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6822                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6823                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6824         }
6825         /* Tx */
6826         for_each_queue(bp, i) {
6827
6828                 /* fastpath tx rings: tx_buf tx_desc */
6829                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6830                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6831                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6832                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6833                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6834         }
6835         /* end of fastpath */
6836
6837         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6838                         sizeof(struct host_def_status_block));
6839
6840         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6841                         sizeof(struct bnx2x_slowpath));
6842
6843 #ifdef BCM_CNIC
6844         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6845
6846         /* allocate searcher T2 table
6847            we allocate 1/4 of alloc num for T2
6848           (which is not entered into the ILT) */
6849         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6850
6851         /* Initialize T2 (for 1024 connections) */
6852         for (i = 0; i < 16*1024; i += 64)
6853                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6854
6855         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6856         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6857
6858         /* QM queues (128*MAX_CONN) */
6859         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6860
6861         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6862                         sizeof(struct host_status_block));
6863 #endif
6864
6865         /* Slow path ring */
6866         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6867
6868         return 0;
6869
6870 alloc_mem_err:
6871         bnx2x_free_mem(bp);
6872         return -ENOMEM;
6873
6874 #undef BNX2X_PCI_ALLOC
6875 #undef BNX2X_ALLOC
6876 }
6877
6878 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6879 {
6880         int i;
6881
6882         for_each_queue(bp, i) {
6883                 struct bnx2x_fastpath *fp = &bp->fp[i];
6884
6885                 u16 bd_cons = fp->tx_bd_cons;
6886                 u16 sw_prod = fp->tx_pkt_prod;
6887                 u16 sw_cons = fp->tx_pkt_cons;
6888
6889                 while (sw_cons != sw_prod) {
6890                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6891                         sw_cons++;
6892                 }
6893         }
6894 }
6895
6896 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6897 {
6898         int i, j;
6899
6900         for_each_queue(bp, j) {
6901                 struct bnx2x_fastpath *fp = &bp->fp[j];
6902
6903                 for (i = 0; i < NUM_RX_BD; i++) {
6904                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6905                         struct sk_buff *skb = rx_buf->skb;
6906
6907                         if (skb == NULL)
6908                                 continue;
6909
6910                         pci_unmap_single(bp->pdev,
6911                                          pci_unmap_addr(rx_buf, mapping),
6912                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6913
6914                         rx_buf->skb = NULL;
6915                         dev_kfree_skb(skb);
6916                 }
6917                 if (!fp->disable_tpa)
6918                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6919                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6920                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6921         }
6922 }
6923
6924 static void bnx2x_free_skbs(struct bnx2x *bp)
6925 {
6926         bnx2x_free_tx_skbs(bp);
6927         bnx2x_free_rx_skbs(bp);
6928 }
6929
6930 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6931 {
6932         int i, offset = 1;
6933
6934         free_irq(bp->msix_table[0].vector, bp->dev);
6935         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6936            bp->msix_table[0].vector);
6937
6938 #ifdef BCM_CNIC
6939         offset++;
6940 #endif
6941         for_each_queue(bp, i) {
6942                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6943                    "state %x\n", i, bp->msix_table[i + offset].vector,
6944                    bnx2x_fp(bp, i, state));
6945
6946                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6947         }
6948 }
6949
6950 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
6951 {
6952         if (bp->flags & USING_MSIX_FLAG) {
6953                 if (!disable_only)
6954                         bnx2x_free_msix_irqs(bp);
6955                 pci_disable_msix(bp->pdev);
6956                 bp->flags &= ~USING_MSIX_FLAG;
6957
6958         } else if (bp->flags & USING_MSI_FLAG) {
6959                 if (!disable_only)
6960                         free_irq(bp->pdev->irq, bp->dev);
6961                 pci_disable_msi(bp->pdev);
6962                 bp->flags &= ~USING_MSI_FLAG;
6963
6964         } else if (!disable_only)
6965                 free_irq(bp->pdev->irq, bp->dev);
6966 }
6967
6968 static int bnx2x_enable_msix(struct bnx2x *bp)
6969 {
6970         int i, rc, offset = 1;
6971         int igu_vec = 0;
6972
6973         bp->msix_table[0].entry = igu_vec;
6974         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6975
6976 #ifdef BCM_CNIC
6977         igu_vec = BP_L_ID(bp) + offset;
6978         bp->msix_table[1].entry = igu_vec;
6979         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6980         offset++;
6981 #endif
6982         for_each_queue(bp, i) {
6983                 igu_vec = BP_L_ID(bp) + offset + i;
6984                 bp->msix_table[i + offset].entry = igu_vec;
6985                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6986                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6987         }
6988
6989         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6990                              BNX2X_NUM_QUEUES(bp) + offset);
6991         if (rc) {
6992                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6993                 return rc;
6994         }
6995
6996         bp->flags |= USING_MSIX_FLAG;
6997
6998         return 0;
6999 }
7000
7001 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7002 {
7003         int i, rc, offset = 1;
7004
7005         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7006                          bp->dev->name, bp->dev);
7007         if (rc) {
7008                 BNX2X_ERR("request sp irq failed\n");
7009                 return -EBUSY;
7010         }
7011
7012 #ifdef BCM_CNIC
7013         offset++;
7014 #endif
7015         for_each_queue(bp, i) {
7016                 struct bnx2x_fastpath *fp = &bp->fp[i];
7017                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7018                          bp->dev->name, i);
7019
7020                 rc = request_irq(bp->msix_table[i + offset].vector,
7021                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7022                 if (rc) {
7023                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7024                         bnx2x_free_msix_irqs(bp);
7025                         return -EBUSY;
7026                 }
7027
7028                 fp->state = BNX2X_FP_STATE_IRQ;
7029         }
7030
7031         i = BNX2X_NUM_QUEUES(bp);
7032         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
7033                     bp->msix_table[0].vector,
7034                     0, bp->msix_table[offset].vector,
7035                     i - 1, bp->msix_table[offset + i - 1].vector);
7036
7037         return 0;
7038 }
7039
7040 static int bnx2x_enable_msi(struct bnx2x *bp)
7041 {
7042         int rc;
7043
7044         rc = pci_enable_msi(bp->pdev);
7045         if (rc) {
7046                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7047                 return -1;
7048         }
7049         bp->flags |= USING_MSI_FLAG;
7050
7051         return 0;
7052 }
7053
7054 static int bnx2x_req_irq(struct bnx2x *bp)
7055 {
7056         unsigned long flags;
7057         int rc;
7058
7059         if (bp->flags & USING_MSI_FLAG)
7060                 flags = 0;
7061         else
7062                 flags = IRQF_SHARED;
7063
7064         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7065                          bp->dev->name, bp->dev);
7066         if (!rc)
7067                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7068
7069         return rc;
7070 }
7071
7072 static void bnx2x_napi_enable(struct bnx2x *bp)
7073 {
7074         int i;
7075
7076         for_each_queue(bp, i)
7077                 napi_enable(&bnx2x_fp(bp, i, napi));
7078 }
7079
7080 static void bnx2x_napi_disable(struct bnx2x *bp)
7081 {
7082         int i;
7083
7084         for_each_queue(bp, i)
7085                 napi_disable(&bnx2x_fp(bp, i, napi));
7086 }
7087
7088 static void bnx2x_netif_start(struct bnx2x *bp)
7089 {
7090         int intr_sem;
7091
7092         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7093         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7094
7095         if (intr_sem) {
7096                 if (netif_running(bp->dev)) {
7097                         bnx2x_napi_enable(bp);
7098                         bnx2x_int_enable(bp);
7099                         if (bp->state == BNX2X_STATE_OPEN)
7100                                 netif_tx_wake_all_queues(bp->dev);
7101                 }
7102         }
7103 }
7104
7105 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7106 {
7107         bnx2x_int_disable_sync(bp, disable_hw);
7108         bnx2x_napi_disable(bp);
7109         netif_tx_disable(bp->dev);
7110 }
7111
7112 /*
7113  * Init service functions
7114  */
7115
7116 /**
7117  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7118  *
7119  * @param bp driver descriptor
7120  * @param set set or clear an entry (1 or 0)
7121  * @param mac pointer to a buffer containing a MAC
7122  * @param cl_bit_vec bit vector of clients to register a MAC for
7123  * @param cam_offset offset in a CAM to use
7124  * @param with_bcast set broadcast MAC as well
7125  */
7126 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7127                                       u32 cl_bit_vec, u8 cam_offset,
7128                                       u8 with_bcast)
7129 {
7130         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7131         int port = BP_PORT(bp);
7132
7133         /* CAM allocation
7134          * unicasts 0-31:port0 32-63:port1
7135          * multicast 64-127:port0 128-191:port1
7136          */
7137         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7138         config->hdr.offset = cam_offset;
7139         config->hdr.client_id = 0xff;
7140         config->hdr.reserved1 = 0;
7141
7142         /* primary MAC */
7143         config->config_table[0].cam_entry.msb_mac_addr =
7144                                         swab16(*(u16 *)&mac[0]);
7145         config->config_table[0].cam_entry.middle_mac_addr =
7146                                         swab16(*(u16 *)&mac[2]);
7147         config->config_table[0].cam_entry.lsb_mac_addr =
7148                                         swab16(*(u16 *)&mac[4]);
7149         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7150         if (set)
7151                 config->config_table[0].target_table_entry.flags = 0;
7152         else
7153                 CAM_INVALIDATE(config->config_table[0]);
7154         config->config_table[0].target_table_entry.clients_bit_vector =
7155                                                 cpu_to_le32(cl_bit_vec);
7156         config->config_table[0].target_table_entry.vlan_id = 0;
7157
7158         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7159            (set ? "setting" : "clearing"),
7160            config->config_table[0].cam_entry.msb_mac_addr,
7161            config->config_table[0].cam_entry.middle_mac_addr,
7162            config->config_table[0].cam_entry.lsb_mac_addr);
7163
7164         /* broadcast */
7165         if (with_bcast) {
7166                 config->config_table[1].cam_entry.msb_mac_addr =
7167                         cpu_to_le16(0xffff);
7168                 config->config_table[1].cam_entry.middle_mac_addr =
7169                         cpu_to_le16(0xffff);
7170                 config->config_table[1].cam_entry.lsb_mac_addr =
7171                         cpu_to_le16(0xffff);
7172                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7173                 if (set)
7174                         config->config_table[1].target_table_entry.flags =
7175                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7176                 else
7177                         CAM_INVALIDATE(config->config_table[1]);
7178                 config->config_table[1].target_table_entry.clients_bit_vector =
7179                                                         cpu_to_le32(cl_bit_vec);
7180                 config->config_table[1].target_table_entry.vlan_id = 0;
7181         }
7182
7183         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7184                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7185                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7186 }
7187
7188 /**
7189  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7190  *
7191  * @param bp driver descriptor
7192  * @param set set or clear an entry (1 or 0)
7193  * @param mac pointer to a buffer containing a MAC
7194  * @param cl_bit_vec bit vector of clients to register a MAC for
7195  * @param cam_offset offset in a CAM to use
7196  */
7197 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7198                                        u32 cl_bit_vec, u8 cam_offset)
7199 {
7200         struct mac_configuration_cmd_e1h *config =
7201                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7202
7203         config->hdr.length = 1;
7204         config->hdr.offset = cam_offset;
7205         config->hdr.client_id = 0xff;
7206         config->hdr.reserved1 = 0;
7207
7208         /* primary MAC */
7209         config->config_table[0].msb_mac_addr =
7210                                         swab16(*(u16 *)&mac[0]);
7211         config->config_table[0].middle_mac_addr =
7212                                         swab16(*(u16 *)&mac[2]);
7213         config->config_table[0].lsb_mac_addr =
7214                                         swab16(*(u16 *)&mac[4]);
7215         config->config_table[0].clients_bit_vector =
7216                                         cpu_to_le32(cl_bit_vec);
7217         config->config_table[0].vlan_id = 0;
7218         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7219         if (set)
7220                 config->config_table[0].flags = BP_PORT(bp);
7221         else
7222                 config->config_table[0].flags =
7223                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7224
7225         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7226            (set ? "setting" : "clearing"),
7227            config->config_table[0].msb_mac_addr,
7228            config->config_table[0].middle_mac_addr,
7229            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7230
7231         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7232                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7233                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7234 }
7235
7236 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7237                              int *state_p, int poll)
7238 {
7239         /* can take a while if any port is running */
7240         int cnt = 5000;
7241
7242         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7243            poll ? "polling" : "waiting", state, idx);
7244
7245         might_sleep();
7246         while (cnt--) {
7247                 if (poll) {
7248                         bnx2x_rx_int(bp->fp, 10);
7249                         /* if index is different from 0
7250                          * the reply for some commands will
7251                          * be on the non default queue
7252                          */
7253                         if (idx)
7254                                 bnx2x_rx_int(&bp->fp[idx], 10);
7255                 }
7256
7257                 mb(); /* state is changed by bnx2x_sp_event() */
7258                 if (*state_p == state) {
7259 #ifdef BNX2X_STOP_ON_ERROR
7260                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7261 #endif
7262                         return 0;
7263                 }
7264
7265                 msleep(1);
7266
7267                 if (bp->panic)
7268                         return -EIO;
7269         }
7270
7271         /* timeout! */
7272         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7273                   poll ? "polling" : "waiting", state, idx);
7274 #ifdef BNX2X_STOP_ON_ERROR
7275         bnx2x_panic();
7276 #endif
7277
7278         return -EBUSY;
7279 }
7280
7281 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7282 {
7283         bp->set_mac_pending++;
7284         smp_wmb();
7285
7286         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7287                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7288
7289         /* Wait for a completion */
7290         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7291 }
7292
7293 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7294 {
7295         bp->set_mac_pending++;
7296         smp_wmb();
7297
7298         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7299                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7300                                   1);
7301
7302         /* Wait for a completion */
7303         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7304 }
7305
7306 #ifdef BCM_CNIC
7307 /**
7308  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7309  * MAC(s). This function will wait until the ramdord completion
7310  * returns.
7311  *
7312  * @param bp driver handle
7313  * @param set set or clear the CAM entry
7314  *
7315  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7316  */
7317 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7318 {
7319         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7320
7321         bp->set_mac_pending++;
7322         smp_wmb();
7323
7324         /* Send a SET_MAC ramrod */
7325         if (CHIP_IS_E1(bp))
7326                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7327                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7328                                   1);
7329         else
7330                 /* CAM allocation for E1H
7331                 * unicasts: by func number
7332                 * multicast: 20+FUNC*20, 20 each
7333                 */
7334                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7335                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7336
7337         /* Wait for a completion when setting */
7338         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7339
7340         return 0;
7341 }
7342 #endif
7343
7344 static int bnx2x_setup_leading(struct bnx2x *bp)
7345 {
7346         int rc;
7347
7348         /* reset IGU state */
7349         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7350
7351         /* SETUP ramrod */
7352         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7353
7354         /* Wait for completion */
7355         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7356
7357         return rc;
7358 }
7359
7360 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7361 {
7362         struct bnx2x_fastpath *fp = &bp->fp[index];
7363
7364         /* reset IGU state */
7365         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7366
7367         /* SETUP ramrod */
7368         fp->state = BNX2X_FP_STATE_OPENING;
7369         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7370                       fp->cl_id, 0);
7371
7372         /* Wait for completion */
7373         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7374                                  &(fp->state), 0);
7375 }
7376
7377 static int bnx2x_poll(struct napi_struct *napi, int budget);
7378
7379 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7380 {
7381
7382         switch (bp->multi_mode) {
7383         case ETH_RSS_MODE_DISABLED:
7384                 bp->num_queues = 1;
7385                 break;
7386
7387         case ETH_RSS_MODE_REGULAR:
7388                 if (num_queues)
7389                         bp->num_queues = min_t(u32, num_queues,
7390                                                   BNX2X_MAX_QUEUES(bp));
7391                 else
7392                         bp->num_queues = min_t(u32, num_online_cpus(),
7393                                                   BNX2X_MAX_QUEUES(bp));
7394                 break;
7395
7396
7397         default:
7398                 bp->num_queues = 1;
7399                 break;
7400         }
7401 }
7402
7403 static int bnx2x_set_num_queues(struct bnx2x *bp)
7404 {
7405         int rc = 0;
7406
7407         switch (int_mode) {
7408         case INT_MODE_INTx:
7409         case INT_MODE_MSI:
7410                 bp->num_queues = 1;
7411                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7412                 break;
7413
7414         case INT_MODE_MSIX:
7415         default:
7416                 /* Set number of queues according to bp->multi_mode value */
7417                 bnx2x_set_num_queues_msix(bp);
7418
7419                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7420                    bp->num_queues);
7421
7422                 /* if we can't use MSI-X we only need one fp,
7423                  * so try to enable MSI-X with the requested number of fp's
7424                  * and fallback to MSI or legacy INTx with one fp
7425                  */
7426                 rc = bnx2x_enable_msix(bp);
7427                 if (rc)
7428                         /* failed to enable MSI-X */
7429                         bp->num_queues = 1;
7430                 break;
7431         }
7432         bp->dev->real_num_tx_queues = bp->num_queues;
7433         return rc;
7434 }
7435
7436 #ifdef BCM_CNIC
7437 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7438 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7439 #endif
7440
7441 /* must be called with rtnl_lock */
7442 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7443 {
7444         u32 load_code;
7445         int i, rc;
7446
7447 #ifdef BNX2X_STOP_ON_ERROR
7448         if (unlikely(bp->panic))
7449                 return -EPERM;
7450 #endif
7451
7452         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7453
7454         rc = bnx2x_set_num_queues(bp);
7455
7456         if (bnx2x_alloc_mem(bp)) {
7457                 bnx2x_free_irq(bp, true);
7458                 return -ENOMEM;
7459         }
7460
7461         for_each_queue(bp, i)
7462                 bnx2x_fp(bp, i, disable_tpa) =
7463                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7464
7465         for_each_queue(bp, i)
7466                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7467                                bnx2x_poll, 128);
7468
7469         bnx2x_napi_enable(bp);
7470
7471         if (bp->flags & USING_MSIX_FLAG) {
7472                 rc = bnx2x_req_msix_irqs(bp);
7473                 if (rc) {
7474                         bnx2x_free_irq(bp, true);
7475                         goto load_error1;
7476                 }
7477         } else {
7478                 /* Fall to INTx if failed to enable MSI-X due to lack of
7479                    memory (in bnx2x_set_num_queues()) */
7480                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7481                         bnx2x_enable_msi(bp);
7482                 bnx2x_ack_int(bp);
7483                 rc = bnx2x_req_irq(bp);
7484                 if (rc) {
7485                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7486                         bnx2x_free_irq(bp, true);
7487                         goto load_error1;
7488                 }
7489                 if (bp->flags & USING_MSI_FLAG) {
7490                         bp->dev->irq = bp->pdev->irq;
7491                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
7492                                     bp->pdev->irq);
7493                 }
7494         }
7495
7496         /* Send LOAD_REQUEST command to MCP
7497            Returns the type of LOAD command:
7498            if it is the first port to be initialized
7499            common blocks should be initialized, otherwise - not
7500         */
7501         if (!BP_NOMCP(bp)) {
7502                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7503                 if (!load_code) {
7504                         BNX2X_ERR("MCP response failure, aborting\n");
7505                         rc = -EBUSY;
7506                         goto load_error2;
7507                 }
7508                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7509                         rc = -EBUSY; /* other port in diagnostic mode */
7510                         goto load_error2;
7511                 }
7512
7513         } else {
7514                 int port = BP_PORT(bp);
7515
7516                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7517                    load_count[0], load_count[1], load_count[2]);
7518                 load_count[0]++;
7519                 load_count[1 + port]++;
7520                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7521                    load_count[0], load_count[1], load_count[2]);
7522                 if (load_count[0] == 1)
7523                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7524                 else if (load_count[1 + port] == 1)
7525                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7526                 else
7527                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7528         }
7529
7530         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7531             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7532                 bp->port.pmf = 1;
7533         else
7534                 bp->port.pmf = 0;
7535         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7536
7537         /* Initialize HW */
7538         rc = bnx2x_init_hw(bp, load_code);
7539         if (rc) {
7540                 BNX2X_ERR("HW init failed, aborting\n");
7541                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7542                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7543                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7544                 goto load_error2;
7545         }
7546
7547         /* Setup NIC internals and enable interrupts */
7548         bnx2x_nic_init(bp, load_code);
7549
7550         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7551             (bp->common.shmem2_base))
7552                 SHMEM2_WR(bp, dcc_support,
7553                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7554                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7555
7556         /* Send LOAD_DONE command to MCP */
7557         if (!BP_NOMCP(bp)) {
7558                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7559                 if (!load_code) {
7560                         BNX2X_ERR("MCP response failure, aborting\n");
7561                         rc = -EBUSY;
7562                         goto load_error3;
7563                 }
7564         }
7565
7566         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7567
7568         rc = bnx2x_setup_leading(bp);
7569         if (rc) {
7570                 BNX2X_ERR("Setup leading failed!\n");
7571 #ifndef BNX2X_STOP_ON_ERROR
7572                 goto load_error3;
7573 #else
7574                 bp->panic = 1;
7575                 return -EBUSY;
7576 #endif
7577         }
7578
7579         if (CHIP_IS_E1H(bp))
7580                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7581                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7582                         bp->flags |= MF_FUNC_DIS;
7583                 }
7584
7585         if (bp->state == BNX2X_STATE_OPEN) {
7586 #ifdef BCM_CNIC
7587                 /* Enable Timer scan */
7588                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7589 #endif
7590                 for_each_nondefault_queue(bp, i) {
7591                         rc = bnx2x_setup_multi(bp, i);
7592                         if (rc)
7593 #ifdef BCM_CNIC
7594                                 goto load_error4;
7595 #else
7596                                 goto load_error3;
7597 #endif
7598                 }
7599
7600                 if (CHIP_IS_E1(bp))
7601                         bnx2x_set_eth_mac_addr_e1(bp, 1);
7602                 else
7603                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
7604 #ifdef BCM_CNIC
7605                 /* Set iSCSI L2 MAC */
7606                 mutex_lock(&bp->cnic_mutex);
7607                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7608                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7609                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7610                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7611                                       CNIC_SB_ID(bp));
7612                 }
7613                 mutex_unlock(&bp->cnic_mutex);
7614 #endif
7615         }
7616
7617         if (bp->port.pmf)
7618                 bnx2x_initial_phy_init(bp, load_mode);
7619
7620         /* Start fast path */
7621         switch (load_mode) {
7622         case LOAD_NORMAL:
7623                 if (bp->state == BNX2X_STATE_OPEN) {
7624                         /* Tx queue should be only reenabled */
7625                         netif_tx_wake_all_queues(bp->dev);
7626                 }
7627                 /* Initialize the receive filter. */
7628                 bnx2x_set_rx_mode(bp->dev);
7629                 break;
7630
7631         case LOAD_OPEN:
7632                 netif_tx_start_all_queues(bp->dev);
7633                 if (bp->state != BNX2X_STATE_OPEN)
7634                         netif_tx_disable(bp->dev);
7635                 /* Initialize the receive filter. */
7636                 bnx2x_set_rx_mode(bp->dev);
7637                 break;
7638
7639         case LOAD_DIAG:
7640                 /* Initialize the receive filter. */
7641                 bnx2x_set_rx_mode(bp->dev);
7642                 bp->state = BNX2X_STATE_DIAG;
7643                 break;
7644
7645         default:
7646                 break;
7647         }
7648
7649         if (!bp->port.pmf)
7650                 bnx2x__link_status_update(bp);
7651
7652         /* start the timer */
7653         mod_timer(&bp->timer, jiffies + bp->current_interval);
7654
7655 #ifdef BCM_CNIC
7656         bnx2x_setup_cnic_irq_info(bp);
7657         if (bp->state == BNX2X_STATE_OPEN)
7658                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7659 #endif
7660
7661         return 0;
7662
7663 #ifdef BCM_CNIC
7664 load_error4:
7665         /* Disable Timer scan */
7666         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7667 #endif
7668 load_error3:
7669         bnx2x_int_disable_sync(bp, 1);
7670         if (!BP_NOMCP(bp)) {
7671                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7672                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7673         }
7674         bp->port.pmf = 0;
7675         /* Free SKBs, SGEs, TPA pool and driver internals */
7676         bnx2x_free_skbs(bp);
7677         for_each_queue(bp, i)
7678                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7679 load_error2:
7680         /* Release IRQs */
7681         bnx2x_free_irq(bp, false);
7682 load_error1:
7683         bnx2x_napi_disable(bp);
7684         for_each_queue(bp, i)
7685                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7686         bnx2x_free_mem(bp);
7687
7688         return rc;
7689 }
7690
7691 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7692 {
7693         struct bnx2x_fastpath *fp = &bp->fp[index];
7694         int rc;
7695
7696         /* halt the connection */
7697         fp->state = BNX2X_FP_STATE_HALTING;
7698         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7699
7700         /* Wait for completion */
7701         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7702                                &(fp->state), 1);
7703         if (rc) /* timeout */
7704                 return rc;
7705
7706         /* delete cfc entry */
7707         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7708
7709         /* Wait for completion */
7710         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7711                                &(fp->state), 1);
7712         return rc;
7713 }
7714
7715 static int bnx2x_stop_leading(struct bnx2x *bp)
7716 {
7717         __le16 dsb_sp_prod_idx;
7718         /* if the other port is handling traffic,
7719            this can take a lot of time */
7720         int cnt = 500;
7721         int rc;
7722
7723         might_sleep();
7724
7725         /* Send HALT ramrod */
7726         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7727         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7728
7729         /* Wait for completion */
7730         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7731                                &(bp->fp[0].state), 1);
7732         if (rc) /* timeout */
7733                 return rc;
7734
7735         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7736
7737         /* Send PORT_DELETE ramrod */
7738         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7739
7740         /* Wait for completion to arrive on default status block
7741            we are going to reset the chip anyway
7742            so there is not much to do if this times out
7743          */
7744         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7745                 if (!cnt) {
7746                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7747                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7748                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7749 #ifdef BNX2X_STOP_ON_ERROR
7750                         bnx2x_panic();
7751 #endif
7752                         rc = -EBUSY;
7753                         break;
7754                 }
7755                 cnt--;
7756                 msleep(1);
7757                 rmb(); /* Refresh the dsb_sp_prod */
7758         }
7759         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7760         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7761
7762         return rc;
7763 }
7764
7765 static void bnx2x_reset_func(struct bnx2x *bp)
7766 {
7767         int port = BP_PORT(bp);
7768         int func = BP_FUNC(bp);
7769         int base, i;
7770
7771         /* Configure IGU */
7772         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7773         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7774
7775 #ifdef BCM_CNIC
7776         /* Disable Timer scan */
7777         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7778         /*
7779          * Wait for at least 10ms and up to 2 second for the timers scan to
7780          * complete
7781          */
7782         for (i = 0; i < 200; i++) {
7783                 msleep(10);
7784                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7785                         break;
7786         }
7787 #endif
7788         /* Clear ILT */
7789         base = FUNC_ILT_BASE(func);
7790         for (i = base; i < base + ILT_PER_FUNC; i++)
7791                 bnx2x_ilt_wr(bp, i, 0);
7792 }
7793
7794 static void bnx2x_reset_port(struct bnx2x *bp)
7795 {
7796         int port = BP_PORT(bp);
7797         u32 val;
7798
7799         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7800
7801         /* Do not rcv packets to BRB */
7802         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7803         /* Do not direct rcv packets that are not for MCP to the BRB */
7804         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7805                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7806
7807         /* Configure AEU */
7808         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7809
7810         msleep(100);
7811         /* Check for BRB port occupancy */
7812         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7813         if (val)
7814                 DP(NETIF_MSG_IFDOWN,
7815                    "BRB1 is not empty  %d blocks are occupied\n", val);
7816
7817         /* TODO: Close Doorbell port? */
7818 }
7819
7820 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7821 {
7822         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7823            BP_FUNC(bp), reset_code);
7824
7825         switch (reset_code) {
7826         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7827                 bnx2x_reset_port(bp);
7828                 bnx2x_reset_func(bp);
7829                 bnx2x_reset_common(bp);
7830                 break;
7831
7832         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7833                 bnx2x_reset_port(bp);
7834                 bnx2x_reset_func(bp);
7835                 break;
7836
7837         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7838                 bnx2x_reset_func(bp);
7839                 break;
7840
7841         default:
7842                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7843                 break;
7844         }
7845 }
7846
7847 /* must be called with rtnl_lock */
7848 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7849 {
7850         int port = BP_PORT(bp);
7851         u32 reset_code = 0;
7852         int i, cnt, rc;
7853
7854 #ifdef BCM_CNIC
7855         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7856 #endif
7857         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7858
7859         /* Set "drop all" */
7860         bp->rx_mode = BNX2X_RX_MODE_NONE;
7861         bnx2x_set_storm_rx_mode(bp);
7862
7863         /* Disable HW interrupts, NAPI and Tx */
7864         bnx2x_netif_stop(bp, 1);
7865
7866         del_timer_sync(&bp->timer);
7867         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7868                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7869         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7870
7871         /* Release IRQs */
7872         bnx2x_free_irq(bp, false);
7873
7874         /* Wait until tx fastpath tasks complete */
7875         for_each_queue(bp, i) {
7876                 struct bnx2x_fastpath *fp = &bp->fp[i];
7877
7878                 cnt = 1000;
7879                 while (bnx2x_has_tx_work_unload(fp)) {
7880
7881                         bnx2x_tx_int(fp);
7882                         if (!cnt) {
7883                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7884                                           i);
7885 #ifdef BNX2X_STOP_ON_ERROR
7886                                 bnx2x_panic();
7887                                 return -EBUSY;
7888 #else
7889                                 break;
7890 #endif
7891                         }
7892                         cnt--;
7893                         msleep(1);
7894                 }
7895         }
7896         /* Give HW time to discard old tx messages */
7897         msleep(1);
7898
7899         if (CHIP_IS_E1(bp)) {
7900                 struct mac_configuration_cmd *config =
7901                                                 bnx2x_sp(bp, mcast_config);
7902
7903                 bnx2x_set_eth_mac_addr_e1(bp, 0);
7904
7905                 for (i = 0; i < config->hdr.length; i++)
7906                         CAM_INVALIDATE(config->config_table[i]);
7907
7908                 config->hdr.length = i;
7909                 if (CHIP_REV_IS_SLOW(bp))
7910                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7911                 else
7912                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7913                 config->hdr.client_id = bp->fp->cl_id;
7914                 config->hdr.reserved1 = 0;
7915
7916                 bp->set_mac_pending++;
7917                 smp_wmb();
7918
7919                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7920                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7921                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7922
7923         } else { /* E1H */
7924                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7925
7926                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7927
7928                 for (i = 0; i < MC_HASH_SIZE; i++)
7929                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7930
7931                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7932         }
7933 #ifdef BCM_CNIC
7934         /* Clear iSCSI L2 MAC */
7935         mutex_lock(&bp->cnic_mutex);
7936         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7937                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7938                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7939         }
7940         mutex_unlock(&bp->cnic_mutex);
7941 #endif
7942
7943         if (unload_mode == UNLOAD_NORMAL)
7944                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7945
7946         else if (bp->flags & NO_WOL_FLAG)
7947                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7948
7949         else if (bp->wol) {
7950                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7951                 u8 *mac_addr = bp->dev->dev_addr;
7952                 u32 val;
7953                 /* The mac address is written to entries 1-4 to
7954                    preserve entry 0 which is used by the PMF */
7955                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7956
7957                 val = (mac_addr[0] << 8) | mac_addr[1];
7958                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7959
7960                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7961                       (mac_addr[4] << 8) | mac_addr[5];
7962                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7963
7964                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7965
7966         } else
7967                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7968
7969         /* Close multi and leading connections
7970            Completions for ramrods are collected in a synchronous way */
7971         for_each_nondefault_queue(bp, i)
7972                 if (bnx2x_stop_multi(bp, i))
7973                         goto unload_error;
7974
7975         rc = bnx2x_stop_leading(bp);
7976         if (rc) {
7977                 BNX2X_ERR("Stop leading failed!\n");
7978 #ifdef BNX2X_STOP_ON_ERROR
7979                 return -EBUSY;
7980 #else
7981                 goto unload_error;
7982 #endif
7983         }
7984
7985 unload_error:
7986         if (!BP_NOMCP(bp))
7987                 reset_code = bnx2x_fw_command(bp, reset_code);
7988         else {
7989                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7990                    load_count[0], load_count[1], load_count[2]);
7991                 load_count[0]--;
7992                 load_count[1 + port]--;
7993                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7994                    load_count[0], load_count[1], load_count[2]);
7995                 if (load_count[0] == 0)
7996                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7997                 else if (load_count[1 + port] == 0)
7998                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7999                 else
8000                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8001         }
8002
8003         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8004             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8005                 bnx2x__link_reset(bp);
8006
8007         /* Reset the chip */
8008         bnx2x_reset_chip(bp, reset_code);
8009
8010         /* Report UNLOAD_DONE to MCP */
8011         if (!BP_NOMCP(bp))
8012                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8013
8014         bp->port.pmf = 0;
8015
8016         /* Free SKBs, SGEs, TPA pool and driver internals */
8017         bnx2x_free_skbs(bp);
8018         for_each_queue(bp, i)
8019                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8020         for_each_queue(bp, i)
8021                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8022         bnx2x_free_mem(bp);
8023
8024         bp->state = BNX2X_STATE_CLOSED;
8025
8026         netif_carrier_off(bp->dev);
8027
8028         return 0;
8029 }
8030
8031 static void bnx2x_reset_task(struct work_struct *work)
8032 {
8033         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8034
8035 #ifdef BNX2X_STOP_ON_ERROR
8036         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8037                   " so reset not done to allow debug dump,\n"
8038                   " you will need to reboot when done\n");
8039         return;
8040 #endif
8041
8042         rtnl_lock();
8043
8044         if (!netif_running(bp->dev))
8045                 goto reset_task_exit;
8046
8047         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8048         bnx2x_nic_load(bp, LOAD_NORMAL);
8049
8050 reset_task_exit:
8051         rtnl_unlock();
8052 }
8053
8054 /* end of nic load/unload */
8055
8056 /* ethtool_ops */
8057
8058 /*
8059  * Init service functions
8060  */
8061
8062 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8063 {
8064         switch (func) {
8065         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8066         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8067         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8068         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8069         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8070         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8071         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8072         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8073         default:
8074                 BNX2X_ERR("Unsupported function index: %d\n", func);
8075                 return (u32)(-1);
8076         }
8077 }
8078
8079 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8080 {
8081         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8082
8083         /* Flush all outstanding writes */
8084         mmiowb();
8085
8086         /* Pretend to be function 0 */
8087         REG_WR(bp, reg, 0);
8088         /* Flush the GRC transaction (in the chip) */
8089         new_val = REG_RD(bp, reg);
8090         if (new_val != 0) {
8091                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8092                           new_val);
8093                 BUG();
8094         }
8095
8096         /* From now we are in the "like-E1" mode */
8097         bnx2x_int_disable(bp);
8098
8099         /* Flush all outstanding writes */
8100         mmiowb();
8101
8102         /* Restore the original funtion settings */
8103         REG_WR(bp, reg, orig_func);
8104         new_val = REG_RD(bp, reg);
8105         if (new_val != orig_func) {
8106                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8107                           orig_func, new_val);
8108                 BUG();
8109         }
8110 }
8111
8112 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8113 {
8114         if (CHIP_IS_E1H(bp))
8115                 bnx2x_undi_int_disable_e1h(bp, func);
8116         else
8117                 bnx2x_int_disable(bp);
8118 }
8119
8120 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8121 {
8122         u32 val;
8123
8124         /* Check if there is any driver already loaded */
8125         val = REG_RD(bp, MISC_REG_UNPREPARED);
8126         if (val == 0x1) {
8127                 /* Check if it is the UNDI driver
8128                  * UNDI driver initializes CID offset for normal bell to 0x7
8129                  */
8130                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8131                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8132                 if (val == 0x7) {
8133                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8134                         /* save our func */
8135                         int func = BP_FUNC(bp);
8136                         u32 swap_en;
8137                         u32 swap_val;
8138
8139                         /* clear the UNDI indication */
8140                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8141
8142                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
8143
8144                         /* try unload UNDI on port 0 */
8145                         bp->func = 0;
8146                         bp->fw_seq =
8147                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8148                                 DRV_MSG_SEQ_NUMBER_MASK);
8149                         reset_code = bnx2x_fw_command(bp, reset_code);
8150
8151                         /* if UNDI is loaded on the other port */
8152                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8153
8154                                 /* send "DONE" for previous unload */
8155                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8156
8157                                 /* unload UNDI on port 1 */
8158                                 bp->func = 1;
8159                                 bp->fw_seq =
8160                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8161                                         DRV_MSG_SEQ_NUMBER_MASK);
8162                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8163
8164                                 bnx2x_fw_command(bp, reset_code);
8165                         }
8166
8167                         /* now it's safe to release the lock */
8168                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8169
8170                         bnx2x_undi_int_disable(bp, func);
8171
8172                         /* close input traffic and wait for it */
8173                         /* Do not rcv packets to BRB */
8174                         REG_WR(bp,
8175                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8176                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8177                         /* Do not direct rcv packets that are not for MCP to
8178                          * the BRB */
8179                         REG_WR(bp,
8180                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8181                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8182                         /* clear AEU */
8183                         REG_WR(bp,
8184                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8185                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8186                         msleep(10);
8187
8188                         /* save NIG port swap info */
8189                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8190                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8191                         /* reset device */
8192                         REG_WR(bp,
8193                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8194                                0xd3ffffff);
8195                         REG_WR(bp,
8196                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8197                                0x1403);
8198                         /* take the NIG out of reset and restore swap values */
8199                         REG_WR(bp,
8200                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8201                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
8202                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8203                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8204
8205                         /* send unload done to the MCP */
8206                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8207
8208                         /* restore our func and fw_seq */
8209                         bp->func = func;
8210                         bp->fw_seq =
8211                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8212                                 DRV_MSG_SEQ_NUMBER_MASK);
8213
8214                 } else
8215                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8216         }
8217 }
8218
8219 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8220 {
8221         u32 val, val2, val3, val4, id;
8222         u16 pmc;
8223
8224         /* Get the chip revision id and number. */
8225         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8226         val = REG_RD(bp, MISC_REG_CHIP_NUM);
8227         id = ((val & 0xffff) << 16);
8228         val = REG_RD(bp, MISC_REG_CHIP_REV);
8229         id |= ((val & 0xf) << 12);
8230         val = REG_RD(bp, MISC_REG_CHIP_METAL);
8231         id |= ((val & 0xff) << 4);
8232         val = REG_RD(bp, MISC_REG_BOND_ID);
8233         id |= (val & 0xf);
8234         bp->common.chip_id = id;
8235         bp->link_params.chip_id = bp->common.chip_id;
8236         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8237
8238         val = (REG_RD(bp, 0x2874) & 0x55);
8239         if ((bp->common.chip_id & 0x1) ||
8240             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8241                 bp->flags |= ONE_PORT_FLAG;
8242                 BNX2X_DEV_INFO("single port device\n");
8243         }
8244
8245         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8246         bp->common.flash_size = (NVRAM_1MB_SIZE <<
8247                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
8248         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8249                        bp->common.flash_size, bp->common.flash_size);
8250
8251         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8252         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8253         bp->link_params.shmem_base = bp->common.shmem_base;
8254         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8255                        bp->common.shmem_base, bp->common.shmem2_base);
8256
8257         if (!bp->common.shmem_base ||
8258             (bp->common.shmem_base < 0xA0000) ||
8259             (bp->common.shmem_base >= 0xC0000)) {
8260                 BNX2X_DEV_INFO("MCP not active\n");
8261                 bp->flags |= NO_MCP_FLAG;
8262                 return;
8263         }
8264
8265         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8266         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8267                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8268                 BNX2X_ERR("BAD MCP validity signature\n");
8269
8270         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8271         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8272
8273         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8274                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8275                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8276
8277         bp->link_params.feature_config_flags = 0;
8278         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8279         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8280                 bp->link_params.feature_config_flags |=
8281                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8282         else
8283                 bp->link_params.feature_config_flags &=
8284                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8285
8286         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8287         bp->common.bc_ver = val;
8288         BNX2X_DEV_INFO("bc_ver %X\n", val);
8289         if (val < BNX2X_BC_VER) {
8290                 /* for now only warn
8291                  * later we might need to enforce this */
8292                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8293                           " please upgrade BC\n", BNX2X_BC_VER, val);
8294         }
8295         bp->link_params.feature_config_flags |=
8296                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8297                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8298
8299         if (BP_E1HVN(bp) == 0) {
8300                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8301                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8302         } else {
8303                 /* no WOL capability for E1HVN != 0 */
8304                 bp->flags |= NO_WOL_FLAG;
8305         }
8306         BNX2X_DEV_INFO("%sWoL capable\n",
8307                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8308
8309         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8310         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8311         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8312         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8313
8314         pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
8315 }
8316
8317 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8318                                                     u32 switch_cfg)
8319 {
8320         int port = BP_PORT(bp);
8321         u32 ext_phy_type;
8322
8323         switch (switch_cfg) {
8324         case SWITCH_CFG_1G:
8325                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8326
8327                 ext_phy_type =
8328                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8329                 switch (ext_phy_type) {
8330                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8331                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8332                                        ext_phy_type);
8333
8334                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8335                                                SUPPORTED_10baseT_Full |
8336                                                SUPPORTED_100baseT_Half |
8337                                                SUPPORTED_100baseT_Full |
8338                                                SUPPORTED_1000baseT_Full |
8339                                                SUPPORTED_2500baseX_Full |
8340                                                SUPPORTED_TP |
8341                                                SUPPORTED_FIBRE |
8342                                                SUPPORTED_Autoneg |
8343                                                SUPPORTED_Pause |
8344                                                SUPPORTED_Asym_Pause);
8345                         break;
8346
8347                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8348                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8349                                        ext_phy_type);
8350
8351                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8352                                                SUPPORTED_10baseT_Full |
8353                                                SUPPORTED_100baseT_Half |
8354                                                SUPPORTED_100baseT_Full |
8355                                                SUPPORTED_1000baseT_Full |
8356                                                SUPPORTED_TP |
8357                                                SUPPORTED_FIBRE |
8358                                                SUPPORTED_Autoneg |
8359                                                SUPPORTED_Pause |
8360                                                SUPPORTED_Asym_Pause);
8361                         break;
8362
8363                 default:
8364                         BNX2X_ERR("NVRAM config error. "
8365                                   "BAD SerDes ext_phy_config 0x%x\n",
8366                                   bp->link_params.ext_phy_config);
8367                         return;
8368                 }
8369
8370                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8371                                            port*0x10);
8372                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8373                 break;
8374
8375         case SWITCH_CFG_10G:
8376                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8377
8378                 ext_phy_type =
8379                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8380                 switch (ext_phy_type) {
8381                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8382                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8383                                        ext_phy_type);
8384
8385                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8386                                                SUPPORTED_10baseT_Full |
8387                                                SUPPORTED_100baseT_Half |
8388                                                SUPPORTED_100baseT_Full |
8389                                                SUPPORTED_1000baseT_Full |
8390                                                SUPPORTED_2500baseX_Full |
8391                                                SUPPORTED_10000baseT_Full |
8392                                                SUPPORTED_TP |
8393                                                SUPPORTED_FIBRE |
8394                                                SUPPORTED_Autoneg |
8395                                                SUPPORTED_Pause |
8396                                                SUPPORTED_Asym_Pause);
8397                         break;
8398
8399                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8400                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8401                                        ext_phy_type);
8402
8403                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8404                                                SUPPORTED_1000baseT_Full |
8405                                                SUPPORTED_FIBRE |
8406                                                SUPPORTED_Autoneg |
8407                                                SUPPORTED_Pause |
8408                                                SUPPORTED_Asym_Pause);
8409                         break;
8410
8411                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8412                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8413                                        ext_phy_type);
8414
8415                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8416                                                SUPPORTED_2500baseX_Full |
8417                                                SUPPORTED_1000baseT_Full |
8418                                                SUPPORTED_FIBRE |
8419                                                SUPPORTED_Autoneg |
8420                                                SUPPORTED_Pause |
8421                                                SUPPORTED_Asym_Pause);
8422                         break;
8423
8424                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8425                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8426                                        ext_phy_type);
8427
8428                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8429                                                SUPPORTED_FIBRE |
8430                                                SUPPORTED_Pause |
8431                                                SUPPORTED_Asym_Pause);
8432                         break;
8433
8434                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8435                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8436                                        ext_phy_type);
8437
8438                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8439                                                SUPPORTED_1000baseT_Full |
8440                                                SUPPORTED_FIBRE |
8441                                                SUPPORTED_Pause |
8442                                                SUPPORTED_Asym_Pause);
8443                         break;
8444
8445                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8446                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8447                                        ext_phy_type);
8448
8449                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8450                                                SUPPORTED_1000baseT_Full |
8451                                                SUPPORTED_Autoneg |
8452                                                SUPPORTED_FIBRE |
8453                                                SUPPORTED_Pause |
8454                                                SUPPORTED_Asym_Pause);
8455                         break;
8456
8457                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8458                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8459                                        ext_phy_type);
8460
8461                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8462                                                SUPPORTED_1000baseT_Full |
8463                                                SUPPORTED_Autoneg |
8464                                                SUPPORTED_FIBRE |
8465                                                SUPPORTED_Pause |
8466                                                SUPPORTED_Asym_Pause);
8467                         break;
8468
8469                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8470                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8471                                        ext_phy_type);
8472
8473                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8474                                                SUPPORTED_TP |
8475                                                SUPPORTED_Autoneg |
8476                                                SUPPORTED_Pause |
8477                                                SUPPORTED_Asym_Pause);
8478                         break;
8479
8480                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8481                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8482                                        ext_phy_type);
8483
8484                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8485                                                SUPPORTED_10baseT_Full |
8486                                                SUPPORTED_100baseT_Half |
8487                                                SUPPORTED_100baseT_Full |
8488                                                SUPPORTED_1000baseT_Full |
8489                                                SUPPORTED_10000baseT_Full |
8490                                                SUPPORTED_TP |
8491                                                SUPPORTED_Autoneg |
8492                                                SUPPORTED_Pause |
8493                                                SUPPORTED_Asym_Pause);
8494                         break;
8495
8496                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8497                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8498                                   bp->link_params.ext_phy_config);
8499                         break;
8500
8501                 default:
8502                         BNX2X_ERR("NVRAM config error. "
8503                                   "BAD XGXS ext_phy_config 0x%x\n",
8504                                   bp->link_params.ext_phy_config);
8505                         return;
8506                 }
8507
8508                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8509                                            port*0x18);
8510                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8511
8512                 break;
8513
8514         default:
8515                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8516                           bp->port.link_config);
8517                 return;
8518         }
8519         bp->link_params.phy_addr = bp->port.phy_addr;
8520
8521         /* mask what we support according to speed_cap_mask */
8522         if (!(bp->link_params.speed_cap_mask &
8523                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8524                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8525
8526         if (!(bp->link_params.speed_cap_mask &
8527                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8528                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8529
8530         if (!(bp->link_params.speed_cap_mask &
8531                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8532                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8533
8534         if (!(bp->link_params.speed_cap_mask &
8535                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8536                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8537
8538         if (!(bp->link_params.speed_cap_mask &
8539                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8540                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8541                                         SUPPORTED_1000baseT_Full);
8542
8543         if (!(bp->link_params.speed_cap_mask &
8544                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8545                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8546
8547         if (!(bp->link_params.speed_cap_mask &
8548                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8549                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8550
8551         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8552 }
8553
8554 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8555 {
8556         bp->link_params.req_duplex = DUPLEX_FULL;
8557
8558         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8559         case PORT_FEATURE_LINK_SPEED_AUTO:
8560                 if (bp->port.supported & SUPPORTED_Autoneg) {
8561                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8562                         bp->port.advertising = bp->port.supported;
8563                 } else {
8564                         u32 ext_phy_type =
8565                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8566
8567                         if ((ext_phy_type ==
8568                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8569                             (ext_phy_type ==
8570                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8571                                 /* force 10G, no AN */
8572                                 bp->link_params.req_line_speed = SPEED_10000;
8573                                 bp->port.advertising =
8574                                                 (ADVERTISED_10000baseT_Full |
8575                                                  ADVERTISED_FIBRE);
8576                                 break;
8577                         }
8578                         BNX2X_ERR("NVRAM config error. "
8579                                   "Invalid link_config 0x%x"
8580                                   "  Autoneg not supported\n",
8581                                   bp->port.link_config);
8582                         return;
8583                 }
8584                 break;
8585
8586         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8587                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8588                         bp->link_params.req_line_speed = SPEED_10;
8589                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8590                                                 ADVERTISED_TP);
8591                 } else {
8592                         BNX2X_ERR("NVRAM config error. "
8593                                   "Invalid link_config 0x%x"
8594                                   "  speed_cap_mask 0x%x\n",
8595                                   bp->port.link_config,
8596                                   bp->link_params.speed_cap_mask);
8597                         return;
8598                 }
8599                 break;
8600
8601         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8602                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8603                         bp->link_params.req_line_speed = SPEED_10;
8604                         bp->link_params.req_duplex = DUPLEX_HALF;
8605                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8606                                                 ADVERTISED_TP);
8607                 } else {
8608                         BNX2X_ERR("NVRAM config error. "
8609                                   "Invalid link_config 0x%x"
8610                                   "  speed_cap_mask 0x%x\n",
8611                                   bp->port.link_config,
8612                                   bp->link_params.speed_cap_mask);
8613                         return;
8614                 }
8615                 break;
8616
8617         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8618                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8619                         bp->link_params.req_line_speed = SPEED_100;
8620                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8621                                                 ADVERTISED_TP);
8622                 } else {
8623                         BNX2X_ERR("NVRAM config error. "
8624                                   "Invalid link_config 0x%x"
8625                                   "  speed_cap_mask 0x%x\n",
8626                                   bp->port.link_config,
8627                                   bp->link_params.speed_cap_mask);
8628                         return;
8629                 }
8630                 break;
8631
8632         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8633                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8634                         bp->link_params.req_line_speed = SPEED_100;
8635                         bp->link_params.req_duplex = DUPLEX_HALF;
8636                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8637                                                 ADVERTISED_TP);
8638                 } else {
8639                         BNX2X_ERR("NVRAM config error. "
8640                                   "Invalid link_config 0x%x"
8641                                   "  speed_cap_mask 0x%x\n",
8642                                   bp->port.link_config,
8643                                   bp->link_params.speed_cap_mask);
8644                         return;
8645                 }
8646                 break;
8647
8648         case PORT_FEATURE_LINK_SPEED_1G:
8649                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8650                         bp->link_params.req_line_speed = SPEED_1000;
8651                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8652                                                 ADVERTISED_TP);
8653                 } else {
8654                         BNX2X_ERR("NVRAM config error. "
8655                                   "Invalid link_config 0x%x"
8656                                   "  speed_cap_mask 0x%x\n",
8657                                   bp->port.link_config,
8658                                   bp->link_params.speed_cap_mask);
8659                         return;
8660                 }
8661                 break;
8662
8663         case PORT_FEATURE_LINK_SPEED_2_5G:
8664                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8665                         bp->link_params.req_line_speed = SPEED_2500;
8666                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8667                                                 ADVERTISED_TP);
8668                 } else {
8669                         BNX2X_ERR("NVRAM config error. "
8670                                   "Invalid link_config 0x%x"
8671                                   "  speed_cap_mask 0x%x\n",
8672                                   bp->port.link_config,
8673                                   bp->link_params.speed_cap_mask);
8674                         return;
8675                 }
8676                 break;
8677
8678         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8679         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8680         case PORT_FEATURE_LINK_SPEED_10G_KR:
8681                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8682                         bp->link_params.req_line_speed = SPEED_10000;
8683                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8684                                                 ADVERTISED_FIBRE);
8685                 } else {
8686                         BNX2X_ERR("NVRAM config error. "
8687                                   "Invalid link_config 0x%x"
8688                                   "  speed_cap_mask 0x%x\n",
8689                                   bp->port.link_config,
8690                                   bp->link_params.speed_cap_mask);
8691                         return;
8692                 }
8693                 break;
8694
8695         default:
8696                 BNX2X_ERR("NVRAM config error. "
8697                           "BAD link speed link_config 0x%x\n",
8698                           bp->port.link_config);
8699                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8700                 bp->port.advertising = bp->port.supported;
8701                 break;
8702         }
8703
8704         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8705                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8706         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8707             !(bp->port.supported & SUPPORTED_Autoneg))
8708                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8709
8710         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8711                        "  advertising 0x%x\n",
8712                        bp->link_params.req_line_speed,
8713                        bp->link_params.req_duplex,
8714                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8715 }
8716
8717 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8718 {
8719         mac_hi = cpu_to_be16(mac_hi);
8720         mac_lo = cpu_to_be32(mac_lo);
8721         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8722         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8723 }
8724
8725 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8726 {
8727         int port = BP_PORT(bp);
8728         u32 val, val2;
8729         u32 config;
8730         u16 i;
8731         u32 ext_phy_type;
8732
8733         bp->link_params.bp = bp;
8734         bp->link_params.port = port;
8735
8736         bp->link_params.lane_config =
8737                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8738         bp->link_params.ext_phy_config =
8739                 SHMEM_RD(bp,
8740                          dev_info.port_hw_config[port].external_phy_config);
8741         /* BCM8727_NOC => BCM8727 no over current */
8742         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8743             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8744                 bp->link_params.ext_phy_config &=
8745                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8746                 bp->link_params.ext_phy_config |=
8747                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8748                 bp->link_params.feature_config_flags |=
8749                         FEATURE_CONFIG_BCM8727_NOC;
8750         }
8751
8752         bp->link_params.speed_cap_mask =
8753                 SHMEM_RD(bp,
8754                          dev_info.port_hw_config[port].speed_capability_mask);
8755
8756         bp->port.link_config =
8757                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8758
8759         /* Get the 4 lanes xgxs config rx and tx */
8760         for (i = 0; i < 2; i++) {
8761                 val = SHMEM_RD(bp,
8762                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8763                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8764                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8765
8766                 val = SHMEM_RD(bp,
8767                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8768                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8769                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8770         }
8771
8772         /* If the device is capable of WoL, set the default state according
8773          * to the HW
8774          */
8775         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8776         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8777                    (config & PORT_FEATURE_WOL_ENABLED));
8778
8779         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8780                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8781                        bp->link_params.lane_config,
8782                        bp->link_params.ext_phy_config,
8783                        bp->link_params.speed_cap_mask, bp->port.link_config);
8784
8785         bp->link_params.switch_cfg |= (bp->port.link_config &
8786                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8787         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8788
8789         bnx2x_link_settings_requested(bp);
8790
8791         /*
8792          * If connected directly, work with the internal PHY, otherwise, work
8793          * with the external PHY
8794          */
8795         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8796         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8797                 bp->mdio.prtad = bp->link_params.phy_addr;
8798
8799         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8800                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8801                 bp->mdio.prtad =
8802                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8803
8804         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8805         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8806         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8807         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8808         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8809
8810 #ifdef BCM_CNIC
8811         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8812         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8813         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8814 #endif
8815 }
8816
8817 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8818 {
8819         int func = BP_FUNC(bp);
8820         u32 val, val2;
8821         int rc = 0;
8822
8823         bnx2x_get_common_hwinfo(bp);
8824
8825         bp->e1hov = 0;
8826         bp->e1hmf = 0;
8827         if (CHIP_IS_E1H(bp)) {
8828                 bp->mf_config =
8829                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8830
8831                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8832                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8833                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8834                         bp->e1hmf = 1;
8835                 BNX2X_DEV_INFO("%s function mode\n",
8836                                IS_E1HMF(bp) ? "multi" : "single");
8837
8838                 if (IS_E1HMF(bp)) {
8839                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8840                                                                 e1hov_tag) &
8841                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8842                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8843                                 bp->e1hov = val;
8844                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8845                                                "(0x%04x)\n",
8846                                                func, bp->e1hov, bp->e1hov);
8847                         } else {
8848                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8849                                           "  aborting\n", func);
8850                                 rc = -EPERM;
8851                         }
8852                 } else {
8853                         if (BP_E1HVN(bp)) {
8854                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8855                                           "  aborting\n", BP_E1HVN(bp));
8856                                 rc = -EPERM;
8857                         }
8858                 }
8859         }
8860
8861         if (!BP_NOMCP(bp)) {
8862                 bnx2x_get_port_hwinfo(bp);
8863
8864                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8865                               DRV_MSG_SEQ_NUMBER_MASK);
8866                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8867         }
8868
8869         if (IS_E1HMF(bp)) {
8870                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8871                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8872                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8873                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8874                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8875                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8876                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8877                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8878                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8879                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8880                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8881                                ETH_ALEN);
8882                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8883                                ETH_ALEN);
8884                 }
8885
8886                 return rc;
8887         }
8888
8889         if (BP_NOMCP(bp)) {
8890                 /* only supposed to happen on emulation/FPGA */
8891                 BNX2X_ERR("warning random MAC workaround active\n");
8892                 random_ether_addr(bp->dev->dev_addr);
8893                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8894         }
8895
8896         return rc;
8897 }
8898
8899 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8900 {
8901         int func = BP_FUNC(bp);
8902         int timer_interval;
8903         int rc;
8904
8905         /* Disable interrupt handling until HW is initialized */
8906         atomic_set(&bp->intr_sem, 1);
8907         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8908
8909         mutex_init(&bp->port.phy_mutex);
8910         mutex_init(&bp->fw_mb_mutex);
8911 #ifdef BCM_CNIC
8912         mutex_init(&bp->cnic_mutex);
8913 #endif
8914
8915         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8916         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8917
8918         rc = bnx2x_get_hwinfo(bp);
8919
8920         /* need to reset chip if undi was active */
8921         if (!BP_NOMCP(bp))
8922                 bnx2x_undi_unload(bp);
8923
8924         if (CHIP_REV_IS_FPGA(bp))
8925                 pr_err("FPGA detected\n");
8926
8927         if (BP_NOMCP(bp) && (func == 0))
8928                 pr_err("MCP disabled, must load devices in order!\n");
8929
8930         /* Set multi queue mode */
8931         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8932             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8933                 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
8934                 multi_mode = ETH_RSS_MODE_DISABLED;
8935         }
8936         bp->multi_mode = multi_mode;
8937
8938
8939         /* Set TPA flags */
8940         if (disable_tpa) {
8941                 bp->flags &= ~TPA_ENABLE_FLAG;
8942                 bp->dev->features &= ~NETIF_F_LRO;
8943         } else {
8944                 bp->flags |= TPA_ENABLE_FLAG;
8945                 bp->dev->features |= NETIF_F_LRO;
8946         }
8947
8948         if (CHIP_IS_E1(bp))
8949                 bp->dropless_fc = 0;
8950         else
8951                 bp->dropless_fc = dropless_fc;
8952
8953         bp->mrrs = mrrs;
8954
8955         bp->tx_ring_size = MAX_TX_AVAIL;
8956         bp->rx_ring_size = MAX_RX_AVAIL;
8957
8958         bp->rx_csum = 1;
8959
8960         /* make sure that the numbers are in the right granularity */
8961         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8962         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8963
8964         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8965         bp->current_interval = (poll ? poll : timer_interval);
8966
8967         init_timer(&bp->timer);
8968         bp->timer.expires = jiffies + bp->current_interval;
8969         bp->timer.data = (unsigned long) bp;
8970         bp->timer.function = bnx2x_timer;
8971
8972         return rc;
8973 }
8974
8975 /*
8976  * ethtool service functions
8977  */
8978
8979 /* All ethtool functions called with rtnl_lock */
8980
8981 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8982 {
8983         struct bnx2x *bp = netdev_priv(dev);
8984
8985         cmd->supported = bp->port.supported;
8986         cmd->advertising = bp->port.advertising;
8987
8988         if ((bp->state == BNX2X_STATE_OPEN) &&
8989             !(bp->flags & MF_FUNC_DIS) &&
8990             (bp->link_vars.link_up)) {
8991                 cmd->speed = bp->link_vars.line_speed;
8992                 cmd->duplex = bp->link_vars.duplex;
8993                 if (IS_E1HMF(bp)) {
8994                         u16 vn_max_rate;
8995
8996                         vn_max_rate =
8997                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8998                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8999                         if (vn_max_rate < cmd->speed)
9000                                 cmd->speed = vn_max_rate;
9001                 }
9002         } else {
9003                 cmd->speed = -1;
9004                 cmd->duplex = -1;
9005         }
9006
9007         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9008                 u32 ext_phy_type =
9009                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9010
9011                 switch (ext_phy_type) {
9012                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9013                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9014                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9015                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9016                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9017                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9018                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9019                         cmd->port = PORT_FIBRE;
9020                         break;
9021
9022                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9023                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9024                         cmd->port = PORT_TP;
9025                         break;
9026
9027                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9028                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9029                                   bp->link_params.ext_phy_config);
9030                         break;
9031
9032                 default:
9033                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9034                            bp->link_params.ext_phy_config);
9035                         break;
9036                 }
9037         } else
9038                 cmd->port = PORT_TP;
9039
9040         cmd->phy_address = bp->mdio.prtad;
9041         cmd->transceiver = XCVR_INTERNAL;
9042
9043         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9044                 cmd->autoneg = AUTONEG_ENABLE;
9045         else
9046                 cmd->autoneg = AUTONEG_DISABLE;
9047
9048         cmd->maxtxpkt = 0;
9049         cmd->maxrxpkt = 0;
9050
9051         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9052            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9053            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9054            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9055            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9056            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9057            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9058
9059         return 0;
9060 }
9061
9062 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9063 {
9064         struct bnx2x *bp = netdev_priv(dev);
9065         u32 advertising;
9066
9067         if (IS_E1HMF(bp))
9068                 return 0;
9069
9070         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9071            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9072            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9073            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9074            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9075            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9076            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9077
9078         if (cmd->autoneg == AUTONEG_ENABLE) {
9079                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9080                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9081                         return -EINVAL;
9082                 }
9083
9084                 /* advertise the requested speed and duplex if supported */
9085                 cmd->advertising &= bp->port.supported;
9086
9087                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9088                 bp->link_params.req_duplex = DUPLEX_FULL;
9089                 bp->port.advertising |= (ADVERTISED_Autoneg |
9090                                          cmd->advertising);
9091
9092         } else { /* forced speed */
9093                 /* advertise the requested speed and duplex if supported */
9094                 switch (cmd->speed) {
9095                 case SPEED_10:
9096                         if (cmd->duplex == DUPLEX_FULL) {
9097                                 if (!(bp->port.supported &
9098                                       SUPPORTED_10baseT_Full)) {
9099                                         DP(NETIF_MSG_LINK,
9100                                            "10M full not supported\n");
9101                                         return -EINVAL;
9102                                 }
9103
9104                                 advertising = (ADVERTISED_10baseT_Full |
9105                                                ADVERTISED_TP);
9106                         } else {
9107                                 if (!(bp->port.supported &
9108                                       SUPPORTED_10baseT_Half)) {
9109                                         DP(NETIF_MSG_LINK,
9110                                            "10M half not supported\n");
9111                                         return -EINVAL;
9112                                 }
9113
9114                                 advertising = (ADVERTISED_10baseT_Half |
9115                                                ADVERTISED_TP);
9116                         }
9117                         break;
9118
9119                 case SPEED_100:
9120                         if (cmd->duplex == DUPLEX_FULL) {
9121                                 if (!(bp->port.supported &
9122                                                 SUPPORTED_100baseT_Full)) {
9123                                         DP(NETIF_MSG_LINK,
9124                                            "100M full not supported\n");
9125                                         return -EINVAL;
9126                                 }
9127
9128                                 advertising = (ADVERTISED_100baseT_Full |
9129                                                ADVERTISED_TP);
9130                         } else {
9131                                 if (!(bp->port.supported &
9132                                                 SUPPORTED_100baseT_Half)) {
9133                                         DP(NETIF_MSG_LINK,
9134                                            "100M half not supported\n");
9135                                         return -EINVAL;
9136                                 }
9137
9138                                 advertising = (ADVERTISED_100baseT_Half |
9139                                                ADVERTISED_TP);
9140                         }
9141                         break;
9142
9143                 case SPEED_1000:
9144                         if (cmd->duplex != DUPLEX_FULL) {
9145                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
9146                                 return -EINVAL;
9147                         }
9148
9149                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9150                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
9151                                 return -EINVAL;
9152                         }
9153
9154                         advertising = (ADVERTISED_1000baseT_Full |
9155                                        ADVERTISED_TP);
9156                         break;
9157
9158                 case SPEED_2500:
9159                         if (cmd->duplex != DUPLEX_FULL) {
9160                                 DP(NETIF_MSG_LINK,
9161                                    "2.5G half not supported\n");
9162                                 return -EINVAL;
9163                         }
9164
9165                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9166                                 DP(NETIF_MSG_LINK,
9167                                    "2.5G full not supported\n");
9168                                 return -EINVAL;
9169                         }
9170
9171                         advertising = (ADVERTISED_2500baseX_Full |
9172                                        ADVERTISED_TP);
9173                         break;
9174
9175                 case SPEED_10000:
9176                         if (cmd->duplex != DUPLEX_FULL) {
9177                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
9178                                 return -EINVAL;
9179                         }
9180
9181                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9182                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
9183                                 return -EINVAL;
9184                         }
9185
9186                         advertising = (ADVERTISED_10000baseT_Full |
9187                                        ADVERTISED_FIBRE);
9188                         break;
9189
9190                 default:
9191                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
9192                         return -EINVAL;
9193                 }
9194
9195                 bp->link_params.req_line_speed = cmd->speed;
9196                 bp->link_params.req_duplex = cmd->duplex;
9197                 bp->port.advertising = advertising;
9198         }
9199
9200         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9201            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
9202            bp->link_params.req_line_speed, bp->link_params.req_duplex,
9203            bp->port.advertising);
9204
9205         if (netif_running(dev)) {
9206                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9207                 bnx2x_link_set(bp);
9208         }
9209
9210         return 0;
9211 }
9212
9213 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9214 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9215
9216 static int bnx2x_get_regs_len(struct net_device *dev)
9217 {
9218         struct bnx2x *bp = netdev_priv(dev);
9219         int regdump_len = 0;
9220         int i;
9221
9222         if (CHIP_IS_E1(bp)) {
9223                 for (i = 0; i < REGS_COUNT; i++)
9224                         if (IS_E1_ONLINE(reg_addrs[i].info))
9225                                 regdump_len += reg_addrs[i].size;
9226
9227                 for (i = 0; i < WREGS_COUNT_E1; i++)
9228                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9229                                 regdump_len += wreg_addrs_e1[i].size *
9230                                         (1 + wreg_addrs_e1[i].read_regs_count);
9231
9232         } else { /* E1H */
9233                 for (i = 0; i < REGS_COUNT; i++)
9234                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9235                                 regdump_len += reg_addrs[i].size;
9236
9237                 for (i = 0; i < WREGS_COUNT_E1H; i++)
9238                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9239                                 regdump_len += wreg_addrs_e1h[i].size *
9240                                         (1 + wreg_addrs_e1h[i].read_regs_count);
9241         }
9242         regdump_len *= 4;
9243         regdump_len += sizeof(struct dump_hdr);
9244
9245         return regdump_len;
9246 }
9247
9248 static void bnx2x_get_regs(struct net_device *dev,
9249                            struct ethtool_regs *regs, void *_p)
9250 {
9251         u32 *p = _p, i, j;
9252         struct bnx2x *bp = netdev_priv(dev);
9253         struct dump_hdr dump_hdr = {0};
9254
9255         regs->version = 0;
9256         memset(p, 0, regs->len);
9257
9258         if (!netif_running(bp->dev))
9259                 return;
9260
9261         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9262         dump_hdr.dump_sign = dump_sign_all;
9263         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9264         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9265         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9266         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9267         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9268
9269         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9270         p += dump_hdr.hdr_size + 1;
9271
9272         if (CHIP_IS_E1(bp)) {
9273                 for (i = 0; i < REGS_COUNT; i++)
9274                         if (IS_E1_ONLINE(reg_addrs[i].info))
9275                                 for (j = 0; j < reg_addrs[i].size; j++)
9276                                         *p++ = REG_RD(bp,
9277                                                       reg_addrs[i].addr + j*4);
9278
9279         } else { /* E1H */
9280                 for (i = 0; i < REGS_COUNT; i++)
9281                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9282                                 for (j = 0; j < reg_addrs[i].size; j++)
9283                                         *p++ = REG_RD(bp,
9284                                                       reg_addrs[i].addr + j*4);
9285         }
9286 }
9287
9288 #define PHY_FW_VER_LEN                  10
9289
9290 static void bnx2x_get_drvinfo(struct net_device *dev,
9291                               struct ethtool_drvinfo *info)
9292 {
9293         struct bnx2x *bp = netdev_priv(dev);
9294         u8 phy_fw_ver[PHY_FW_VER_LEN];
9295
9296         strcpy(info->driver, DRV_MODULE_NAME);
9297         strcpy(info->version, DRV_MODULE_VERSION);
9298
9299         phy_fw_ver[0] = '\0';
9300         if (bp->port.pmf) {
9301                 bnx2x_acquire_phy_lock(bp);
9302                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9303                                              (bp->state != BNX2X_STATE_CLOSED),
9304                                              phy_fw_ver, PHY_FW_VER_LEN);
9305                 bnx2x_release_phy_lock(bp);
9306         }
9307
9308         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9309                  (bp->common.bc_ver & 0xff0000) >> 16,
9310                  (bp->common.bc_ver & 0xff00) >> 8,
9311                  (bp->common.bc_ver & 0xff),
9312                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9313         strcpy(info->bus_info, pci_name(bp->pdev));
9314         info->n_stats = BNX2X_NUM_STATS;
9315         info->testinfo_len = BNX2X_NUM_TESTS;
9316         info->eedump_len = bp->common.flash_size;
9317         info->regdump_len = bnx2x_get_regs_len(dev);
9318 }
9319
9320 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9321 {
9322         struct bnx2x *bp = netdev_priv(dev);
9323
9324         if (bp->flags & NO_WOL_FLAG) {
9325                 wol->supported = 0;
9326                 wol->wolopts = 0;
9327         } else {
9328                 wol->supported = WAKE_MAGIC;
9329                 if (bp->wol)
9330                         wol->wolopts = WAKE_MAGIC;
9331                 else
9332                         wol->wolopts = 0;
9333         }
9334         memset(&wol->sopass, 0, sizeof(wol->sopass));
9335 }
9336
9337 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9338 {
9339         struct bnx2x *bp = netdev_priv(dev);
9340
9341         if (wol->wolopts & ~WAKE_MAGIC)
9342                 return -EINVAL;
9343
9344         if (wol->wolopts & WAKE_MAGIC) {
9345                 if (bp->flags & NO_WOL_FLAG)
9346                         return -EINVAL;
9347
9348                 bp->wol = 1;
9349         } else
9350                 bp->wol = 0;
9351
9352         return 0;
9353 }
9354
9355 static u32 bnx2x_get_msglevel(struct net_device *dev)
9356 {
9357         struct bnx2x *bp = netdev_priv(dev);
9358
9359         return bp->msg_enable;
9360 }
9361
9362 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9363 {
9364         struct bnx2x *bp = netdev_priv(dev);
9365
9366         if (capable(CAP_NET_ADMIN))
9367                 bp->msg_enable = level;
9368 }
9369
9370 static int bnx2x_nway_reset(struct net_device *dev)
9371 {
9372         struct bnx2x *bp = netdev_priv(dev);
9373
9374         if (!bp->port.pmf)
9375                 return 0;
9376
9377         if (netif_running(dev)) {
9378                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9379                 bnx2x_link_set(bp);
9380         }
9381
9382         return 0;
9383 }
9384
9385 static u32 bnx2x_get_link(struct net_device *dev)
9386 {
9387         struct bnx2x *bp = netdev_priv(dev);
9388
9389         if (bp->flags & MF_FUNC_DIS)
9390                 return 0;
9391
9392         return bp->link_vars.link_up;
9393 }
9394
9395 static int bnx2x_get_eeprom_len(struct net_device *dev)
9396 {
9397         struct bnx2x *bp = netdev_priv(dev);
9398
9399         return bp->common.flash_size;
9400 }
9401
9402 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9403 {
9404         int port = BP_PORT(bp);
9405         int count, i;
9406         u32 val = 0;
9407
9408         /* adjust timeout for emulation/FPGA */
9409         count = NVRAM_TIMEOUT_COUNT;
9410         if (CHIP_REV_IS_SLOW(bp))
9411                 count *= 100;
9412
9413         /* request access to nvram interface */
9414         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9415                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9416
9417         for (i = 0; i < count*10; i++) {
9418                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9419                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9420                         break;
9421
9422                 udelay(5);
9423         }
9424
9425         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9426                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9427                 return -EBUSY;
9428         }
9429
9430         return 0;
9431 }
9432
9433 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9434 {
9435         int port = BP_PORT(bp);
9436         int count, i;
9437         u32 val = 0;
9438
9439         /* adjust timeout for emulation/FPGA */
9440         count = NVRAM_TIMEOUT_COUNT;
9441         if (CHIP_REV_IS_SLOW(bp))
9442                 count *= 100;
9443
9444         /* relinquish nvram interface */
9445         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9446                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9447
9448         for (i = 0; i < count*10; i++) {
9449                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9450                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9451                         break;
9452
9453                 udelay(5);
9454         }
9455
9456         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9457                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9458                 return -EBUSY;
9459         }
9460
9461         return 0;
9462 }
9463
9464 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9465 {
9466         u32 val;
9467
9468         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9469
9470         /* enable both bits, even on read */
9471         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9472                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9473                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9474 }
9475
9476 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9477 {
9478         u32 val;
9479
9480         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9481
9482         /* disable both bits, even after read */
9483         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9484                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9485                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9486 }
9487
9488 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9489                                   u32 cmd_flags)
9490 {
9491         int count, i, rc;
9492         u32 val;
9493
9494         /* build the command word */
9495         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9496
9497         /* need to clear DONE bit separately */
9498         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9499
9500         /* address of the NVRAM to read from */
9501         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9502                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9503
9504         /* issue a read command */
9505         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9506
9507         /* adjust timeout for emulation/FPGA */
9508         count = NVRAM_TIMEOUT_COUNT;
9509         if (CHIP_REV_IS_SLOW(bp))
9510                 count *= 100;
9511
9512         /* wait for completion */
9513         *ret_val = 0;
9514         rc = -EBUSY;
9515         for (i = 0; i < count; i++) {
9516                 udelay(5);
9517                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9518
9519                 if (val & MCPR_NVM_COMMAND_DONE) {
9520                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9521                         /* we read nvram data in cpu order
9522                          * but ethtool sees it as an array of bytes
9523                          * converting to big-endian will do the work */
9524                         *ret_val = cpu_to_be32(val);
9525                         rc = 0;
9526                         break;
9527                 }
9528         }
9529
9530         return rc;
9531 }
9532
9533 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9534                             int buf_size)
9535 {
9536         int rc;
9537         u32 cmd_flags;
9538         __be32 val;
9539
9540         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9541                 DP(BNX2X_MSG_NVM,
9542                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9543                    offset, buf_size);
9544                 return -EINVAL;
9545         }
9546
9547         if (offset + buf_size > bp->common.flash_size) {
9548                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9549                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9550                    offset, buf_size, bp->common.flash_size);
9551                 return -EINVAL;
9552         }
9553
9554         /* request access to nvram interface */
9555         rc = bnx2x_acquire_nvram_lock(bp);
9556         if (rc)
9557                 return rc;
9558
9559         /* enable access to nvram interface */
9560         bnx2x_enable_nvram_access(bp);
9561
9562         /* read the first word(s) */
9563         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9564         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9565                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9566                 memcpy(ret_buf, &val, 4);
9567
9568                 /* advance to the next dword */
9569                 offset += sizeof(u32);
9570                 ret_buf += sizeof(u32);
9571                 buf_size -= sizeof(u32);
9572                 cmd_flags = 0;
9573         }
9574
9575         if (rc == 0) {
9576                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9577                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9578                 memcpy(ret_buf, &val, 4);
9579         }
9580
9581         /* disable access to nvram interface */
9582         bnx2x_disable_nvram_access(bp);
9583         bnx2x_release_nvram_lock(bp);
9584
9585         return rc;
9586 }
9587
9588 static int bnx2x_get_eeprom(struct net_device *dev,
9589                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9590 {
9591         struct bnx2x *bp = netdev_priv(dev);
9592         int rc;
9593
9594         if (!netif_running(dev))
9595                 return -EAGAIN;
9596
9597         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9598            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9599            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9600            eeprom->len, eeprom->len);
9601
9602         /* parameters already validated in ethtool_get_eeprom */
9603
9604         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9605
9606         return rc;
9607 }
9608
9609 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9610                                    u32 cmd_flags)
9611 {
9612         int count, i, rc;
9613
9614         /* build the command word */
9615         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9616
9617         /* need to clear DONE bit separately */
9618         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9619
9620         /* write the data */
9621         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9622
9623         /* address of the NVRAM to write to */
9624         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9625                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9626
9627         /* issue the write command */
9628         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9629
9630         /* adjust timeout for emulation/FPGA */
9631         count = NVRAM_TIMEOUT_COUNT;
9632         if (CHIP_REV_IS_SLOW(bp))
9633                 count *= 100;
9634
9635         /* wait for completion */
9636         rc = -EBUSY;
9637         for (i = 0; i < count; i++) {
9638                 udelay(5);
9639                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9640                 if (val & MCPR_NVM_COMMAND_DONE) {
9641                         rc = 0;
9642                         break;
9643                 }
9644         }
9645
9646         return rc;
9647 }
9648
9649 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9650
9651 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9652                               int buf_size)
9653 {
9654         int rc;
9655         u32 cmd_flags;
9656         u32 align_offset;
9657         __be32 val;
9658
9659         if (offset + buf_size > bp->common.flash_size) {
9660                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9661                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9662                    offset, buf_size, bp->common.flash_size);
9663                 return -EINVAL;
9664         }
9665
9666         /* request access to nvram interface */
9667         rc = bnx2x_acquire_nvram_lock(bp);
9668         if (rc)
9669                 return rc;
9670
9671         /* enable access to nvram interface */
9672         bnx2x_enable_nvram_access(bp);
9673
9674         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9675         align_offset = (offset & ~0x03);
9676         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9677
9678         if (rc == 0) {
9679                 val &= ~(0xff << BYTE_OFFSET(offset));
9680                 val |= (*data_buf << BYTE_OFFSET(offset));
9681
9682                 /* nvram data is returned as an array of bytes
9683                  * convert it back to cpu order */
9684                 val = be32_to_cpu(val);
9685
9686                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9687                                              cmd_flags);
9688         }
9689
9690         /* disable access to nvram interface */
9691         bnx2x_disable_nvram_access(bp);
9692         bnx2x_release_nvram_lock(bp);
9693
9694         return rc;
9695 }
9696
9697 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9698                              int buf_size)
9699 {
9700         int rc;
9701         u32 cmd_flags;
9702         u32 val;
9703         u32 written_so_far;
9704
9705         if (buf_size == 1)      /* ethtool */
9706                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9707
9708         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9709                 DP(BNX2X_MSG_NVM,
9710                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9711                    offset, buf_size);
9712                 return -EINVAL;
9713         }
9714
9715         if (offset + buf_size > bp->common.flash_size) {
9716                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9717                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9718                    offset, buf_size, bp->common.flash_size);
9719                 return -EINVAL;
9720         }
9721
9722         /* request access to nvram interface */
9723         rc = bnx2x_acquire_nvram_lock(bp);
9724         if (rc)
9725                 return rc;
9726
9727         /* enable access to nvram interface */
9728         bnx2x_enable_nvram_access(bp);
9729
9730         written_so_far = 0;
9731         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9732         while ((written_so_far < buf_size) && (rc == 0)) {
9733                 if (written_so_far == (buf_size - sizeof(u32)))
9734                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9735                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9736                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9737                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9738                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9739
9740                 memcpy(&val, data_buf, 4);
9741
9742                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9743
9744                 /* advance to the next dword */
9745                 offset += sizeof(u32);
9746                 data_buf += sizeof(u32);
9747                 written_so_far += sizeof(u32);
9748                 cmd_flags = 0;
9749         }
9750
9751         /* disable access to nvram interface */
9752         bnx2x_disable_nvram_access(bp);
9753         bnx2x_release_nvram_lock(bp);
9754
9755         return rc;
9756 }
9757
9758 static int bnx2x_set_eeprom(struct net_device *dev,
9759                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9760 {
9761         struct bnx2x *bp = netdev_priv(dev);
9762         int port = BP_PORT(bp);
9763         int rc = 0;
9764
9765         if (!netif_running(dev))
9766                 return -EAGAIN;
9767
9768         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9769            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9770            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9771            eeprom->len, eeprom->len);
9772
9773         /* parameters already validated in ethtool_set_eeprom */
9774
9775         /* PHY eeprom can be accessed only by the PMF */
9776         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9777             !bp->port.pmf)
9778                 return -EINVAL;
9779
9780         if (eeprom->magic == 0x50485950) {
9781                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9782                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9783
9784                 bnx2x_acquire_phy_lock(bp);
9785                 rc |= bnx2x_link_reset(&bp->link_params,
9786                                        &bp->link_vars, 0);
9787                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9788                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9789                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9790                                        MISC_REGISTERS_GPIO_HIGH, port);
9791                 bnx2x_release_phy_lock(bp);
9792                 bnx2x_link_report(bp);
9793
9794         } else if (eeprom->magic == 0x50485952) {
9795                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9796                 if (bp->state == BNX2X_STATE_OPEN) {
9797                         bnx2x_acquire_phy_lock(bp);
9798                         rc |= bnx2x_link_reset(&bp->link_params,
9799                                                &bp->link_vars, 1);
9800
9801                         rc |= bnx2x_phy_init(&bp->link_params,
9802                                              &bp->link_vars);
9803                         bnx2x_release_phy_lock(bp);
9804                         bnx2x_calc_fc_adv(bp);
9805                 }
9806         } else if (eeprom->magic == 0x53985943) {
9807                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9808                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9809                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9810                         u8 ext_phy_addr =
9811                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9812
9813                         /* DSP Remove Download Mode */
9814                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9815                                        MISC_REGISTERS_GPIO_LOW, port);
9816
9817                         bnx2x_acquire_phy_lock(bp);
9818
9819                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9820
9821                         /* wait 0.5 sec to allow it to run */
9822                         msleep(500);
9823                         bnx2x_ext_phy_hw_reset(bp, port);
9824                         msleep(500);
9825                         bnx2x_release_phy_lock(bp);
9826                 }
9827         } else
9828                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9829
9830         return rc;
9831 }
9832
9833 static int bnx2x_get_coalesce(struct net_device *dev,
9834                               struct ethtool_coalesce *coal)
9835 {
9836         struct bnx2x *bp = netdev_priv(dev);
9837
9838         memset(coal, 0, sizeof(struct ethtool_coalesce));
9839
9840         coal->rx_coalesce_usecs = bp->rx_ticks;
9841         coal->tx_coalesce_usecs = bp->tx_ticks;
9842
9843         return 0;
9844 }
9845
9846 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9847 static int bnx2x_set_coalesce(struct net_device *dev,
9848                               struct ethtool_coalesce *coal)
9849 {
9850         struct bnx2x *bp = netdev_priv(dev);
9851
9852         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9853         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9854                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9855
9856         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9857         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9858                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9859
9860         if (netif_running(dev))
9861                 bnx2x_update_coalesce(bp);
9862
9863         return 0;
9864 }
9865
9866 static void bnx2x_get_ringparam(struct net_device *dev,
9867                                 struct ethtool_ringparam *ering)
9868 {
9869         struct bnx2x *bp = netdev_priv(dev);
9870
9871         ering->rx_max_pending = MAX_RX_AVAIL;
9872         ering->rx_mini_max_pending = 0;
9873         ering->rx_jumbo_max_pending = 0;
9874
9875         ering->rx_pending = bp->rx_ring_size;
9876         ering->rx_mini_pending = 0;
9877         ering->rx_jumbo_pending = 0;
9878
9879         ering->tx_max_pending = MAX_TX_AVAIL;
9880         ering->tx_pending = bp->tx_ring_size;
9881 }
9882
9883 static int bnx2x_set_ringparam(struct net_device *dev,
9884                                struct ethtool_ringparam *ering)
9885 {
9886         struct bnx2x *bp = netdev_priv(dev);
9887         int rc = 0;
9888
9889         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9890             (ering->tx_pending > MAX_TX_AVAIL) ||
9891             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9892                 return -EINVAL;
9893
9894         bp->rx_ring_size = ering->rx_pending;
9895         bp->tx_ring_size = ering->tx_pending;
9896
9897         if (netif_running(dev)) {
9898                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9899                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9900         }
9901
9902         return rc;
9903 }
9904
9905 static void bnx2x_get_pauseparam(struct net_device *dev,
9906                                  struct ethtool_pauseparam *epause)
9907 {
9908         struct bnx2x *bp = netdev_priv(dev);
9909
9910         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9911                            BNX2X_FLOW_CTRL_AUTO) &&
9912                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9913
9914         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9915                             BNX2X_FLOW_CTRL_RX);
9916         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9917                             BNX2X_FLOW_CTRL_TX);
9918
9919         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9920            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9921            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9922 }
9923
9924 static int bnx2x_set_pauseparam(struct net_device *dev,
9925                                 struct ethtool_pauseparam *epause)
9926 {
9927         struct bnx2x *bp = netdev_priv(dev);
9928
9929         if (IS_E1HMF(bp))
9930                 return 0;
9931
9932         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9933            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9934            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9935
9936         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9937
9938         if (epause->rx_pause)
9939                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9940
9941         if (epause->tx_pause)
9942                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9943
9944         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9945                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9946
9947         if (epause->autoneg) {
9948                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9949                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9950                         return -EINVAL;
9951                 }
9952
9953                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9954                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9955         }
9956
9957         DP(NETIF_MSG_LINK,
9958            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9959
9960         if (netif_running(dev)) {
9961                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9962                 bnx2x_link_set(bp);
9963         }
9964
9965         return 0;
9966 }
9967
9968 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9969 {
9970         struct bnx2x *bp = netdev_priv(dev);
9971         int changed = 0;
9972         int rc = 0;
9973
9974         /* TPA requires Rx CSUM offloading */
9975         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9976                 if (!disable_tpa) {
9977                         if (!(dev->features & NETIF_F_LRO)) {
9978                                 dev->features |= NETIF_F_LRO;
9979                                 bp->flags |= TPA_ENABLE_FLAG;
9980                                 changed = 1;
9981                         }
9982                 } else
9983                         rc = -EINVAL;
9984         } else if (dev->features & NETIF_F_LRO) {
9985                 dev->features &= ~NETIF_F_LRO;
9986                 bp->flags &= ~TPA_ENABLE_FLAG;
9987                 changed = 1;
9988         }
9989
9990         if (changed && netif_running(dev)) {
9991                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9992                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9993         }
9994
9995         return rc;
9996 }
9997
9998 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9999 {
10000         struct bnx2x *bp = netdev_priv(dev);
10001
10002         return bp->rx_csum;
10003 }
10004
10005 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10006 {
10007         struct bnx2x *bp = netdev_priv(dev);
10008         int rc = 0;
10009
10010         bp->rx_csum = data;
10011
10012         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10013            TPA'ed packets will be discarded due to wrong TCP CSUM */
10014         if (!data) {
10015                 u32 flags = ethtool_op_get_flags(dev);
10016
10017                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10018         }
10019
10020         return rc;
10021 }
10022
10023 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10024 {
10025         if (data) {
10026                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10027                 dev->features |= NETIF_F_TSO6;
10028         } else {
10029                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10030                 dev->features &= ~NETIF_F_TSO6;
10031         }
10032
10033         return 0;
10034 }
10035
10036 static const struct {
10037         char string[ETH_GSTRING_LEN];
10038 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10039         { "register_test (offline)" },
10040         { "memory_test (offline)" },
10041         { "loopback_test (offline)" },
10042         { "nvram_test (online)" },
10043         { "interrupt_test (online)" },
10044         { "link_test (online)" },
10045         { "idle check (online)" }
10046 };
10047
10048 static int bnx2x_test_registers(struct bnx2x *bp)
10049 {
10050         int idx, i, rc = -ENODEV;
10051         u32 wr_val = 0;
10052         int port = BP_PORT(bp);
10053         static const struct {
10054                 u32  offset0;
10055                 u32  offset1;
10056                 u32  mask;
10057         } reg_tbl[] = {
10058 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
10059                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
10060                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
10061                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
10062                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
10063                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
10064                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
10065                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
10066                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
10067                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
10068 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
10069                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
10070                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
10071                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
10072                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
10073                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10074                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
10075                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
10076                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
10077                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
10078 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
10079                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
10080                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
10081                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
10082                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
10083                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
10084                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
10085                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
10086                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
10087                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
10088 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
10089                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
10090                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
10091                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10092                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
10093                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10094                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
10095
10096                 { 0xffffffff, 0, 0x00000000 }
10097         };
10098
10099         if (!netif_running(bp->dev))
10100                 return rc;
10101
10102         /* Repeat the test twice:
10103            First by writing 0x00000000, second by writing 0xffffffff */
10104         for (idx = 0; idx < 2; idx++) {
10105
10106                 switch (idx) {
10107                 case 0:
10108                         wr_val = 0;
10109                         break;
10110                 case 1:
10111                         wr_val = 0xffffffff;
10112                         break;
10113                 }
10114
10115                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10116                         u32 offset, mask, save_val, val;
10117
10118                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10119                         mask = reg_tbl[i].mask;
10120
10121                         save_val = REG_RD(bp, offset);
10122
10123                         REG_WR(bp, offset, wr_val);
10124                         val = REG_RD(bp, offset);
10125
10126                         /* Restore the original register's value */
10127                         REG_WR(bp, offset, save_val);
10128
10129                         /* verify that value is as expected value */
10130                         if ((val & mask) != (wr_val & mask))
10131                                 goto test_reg_exit;
10132                 }
10133         }
10134
10135         rc = 0;
10136
10137 test_reg_exit:
10138         return rc;
10139 }
10140
10141 static int bnx2x_test_memory(struct bnx2x *bp)
10142 {
10143         int i, j, rc = -ENODEV;
10144         u32 val;
10145         static const struct {
10146                 u32 offset;
10147                 int size;
10148         } mem_tbl[] = {
10149                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
10150                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10151                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
10152                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
10153                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
10154                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
10155                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
10156
10157                 { 0xffffffff, 0 }
10158         };
10159         static const struct {
10160                 char *name;
10161                 u32 offset;
10162                 u32 e1_mask;
10163                 u32 e1h_mask;
10164         } prty_tbl[] = {
10165                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
10166                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
10167                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
10168                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
10169                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
10170                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
10171
10172                 { NULL, 0xffffffff, 0, 0 }
10173         };
10174
10175         if (!netif_running(bp->dev))
10176                 return rc;
10177
10178         /* Go through all the memories */
10179         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10180                 for (j = 0; j < mem_tbl[i].size; j++)
10181                         REG_RD(bp, mem_tbl[i].offset + j*4);
10182
10183         /* Check the parity status */
10184         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10185                 val = REG_RD(bp, prty_tbl[i].offset);
10186                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10187                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10188                         DP(NETIF_MSG_HW,
10189                            "%s is 0x%x\n", prty_tbl[i].name, val);
10190                         goto test_mem_exit;
10191                 }
10192         }
10193
10194         rc = 0;
10195
10196 test_mem_exit:
10197         return rc;
10198 }
10199
10200 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10201 {
10202         int cnt = 1000;
10203
10204         if (link_up)
10205                 while (bnx2x_link_test(bp) && cnt--)
10206                         msleep(10);
10207 }
10208
10209 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10210 {
10211         unsigned int pkt_size, num_pkts, i;
10212         struct sk_buff *skb;
10213         unsigned char *packet;
10214         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10215         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
10216         u16 tx_start_idx, tx_idx;
10217         u16 rx_start_idx, rx_idx;
10218         u16 pkt_prod, bd_prod;
10219         struct sw_tx_bd *tx_buf;
10220         struct eth_tx_start_bd *tx_start_bd;
10221         struct eth_tx_parse_bd *pbd = NULL;
10222         dma_addr_t mapping;
10223         union eth_rx_cqe *cqe;
10224         u8 cqe_fp_flags;
10225         struct sw_rx_bd *rx_buf;
10226         u16 len;
10227         int rc = -ENODEV;
10228
10229         /* check the loopback mode */
10230         switch (loopback_mode) {
10231         case BNX2X_PHY_LOOPBACK:
10232                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10233                         return -EINVAL;
10234                 break;
10235         case BNX2X_MAC_LOOPBACK:
10236                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10237                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10238                 break;
10239         default:
10240                 return -EINVAL;
10241         }
10242
10243         /* prepare the loopback packet */
10244         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10245                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10246         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10247         if (!skb) {
10248                 rc = -ENOMEM;
10249                 goto test_loopback_exit;
10250         }
10251         packet = skb_put(skb, pkt_size);
10252         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10253         memset(packet + ETH_ALEN, 0, ETH_ALEN);
10254         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10255         for (i = ETH_HLEN; i < pkt_size; i++)
10256                 packet[i] = (unsigned char) (i & 0xff);
10257
10258         /* send the loopback packet */
10259         num_pkts = 0;
10260         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10261         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10262
10263         pkt_prod = fp_tx->tx_pkt_prod++;
10264         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10265         tx_buf->first_bd = fp_tx->tx_bd_prod;
10266         tx_buf->skb = skb;
10267         tx_buf->flags = 0;
10268
10269         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10270         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10271         mapping = pci_map_single(bp->pdev, skb->data,
10272                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10273         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10274         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10275         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10276         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10277         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10278         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10279         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10280                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10281
10282         /* turn on parsing and get a BD */
10283         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10284         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10285
10286         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10287
10288         wmb();
10289
10290         fp_tx->tx_db.data.prod += 2;
10291         barrier();
10292         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10293
10294         mmiowb();
10295
10296         num_pkts++;
10297         fp_tx->tx_bd_prod += 2; /* start + pbd */
10298
10299         udelay(100);
10300
10301         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10302         if (tx_idx != tx_start_idx + num_pkts)
10303                 goto test_loopback_exit;
10304
10305         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10306         if (rx_idx != rx_start_idx + num_pkts)
10307                 goto test_loopback_exit;
10308
10309         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10310         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10311         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10312                 goto test_loopback_rx_exit;
10313
10314         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10315         if (len != pkt_size)
10316                 goto test_loopback_rx_exit;
10317
10318         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10319         skb = rx_buf->skb;
10320         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10321         for (i = ETH_HLEN; i < pkt_size; i++)
10322                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10323                         goto test_loopback_rx_exit;
10324
10325         rc = 0;
10326
10327 test_loopback_rx_exit:
10328
10329         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10330         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10331         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10332         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10333
10334         /* Update producers */
10335         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10336                              fp_rx->rx_sge_prod);
10337
10338 test_loopback_exit:
10339         bp->link_params.loopback_mode = LOOPBACK_NONE;
10340
10341         return rc;
10342 }
10343
10344 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10345 {
10346         int rc = 0, res;
10347
10348         if (!netif_running(bp->dev))
10349                 return BNX2X_LOOPBACK_FAILED;
10350
10351         bnx2x_netif_stop(bp, 1);
10352         bnx2x_acquire_phy_lock(bp);
10353
10354         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10355         if (res) {
10356                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10357                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10358         }
10359
10360         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10361         if (res) {
10362                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10363                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10364         }
10365
10366         bnx2x_release_phy_lock(bp);
10367         bnx2x_netif_start(bp);
10368
10369         return rc;
10370 }
10371
10372 #define CRC32_RESIDUAL                  0xdebb20e3
10373
10374 static int bnx2x_test_nvram(struct bnx2x *bp)
10375 {
10376         static const struct {
10377                 int offset;
10378                 int size;
10379         } nvram_tbl[] = {
10380                 {     0,  0x14 }, /* bootstrap */
10381                 {  0x14,  0xec }, /* dir */
10382                 { 0x100, 0x350 }, /* manuf_info */
10383                 { 0x450,  0xf0 }, /* feature_info */
10384                 { 0x640,  0x64 }, /* upgrade_key_info */
10385                 { 0x6a4,  0x64 },
10386                 { 0x708,  0x70 }, /* manuf_key_info */
10387                 { 0x778,  0x70 },
10388                 {     0,     0 }
10389         };
10390         __be32 buf[0x350 / 4];
10391         u8 *data = (u8 *)buf;
10392         int i, rc;
10393         u32 magic, crc;
10394
10395         rc = bnx2x_nvram_read(bp, 0, data, 4);
10396         if (rc) {
10397                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10398                 goto test_nvram_exit;
10399         }
10400
10401         magic = be32_to_cpu(buf[0]);
10402         if (magic != 0x669955aa) {
10403                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10404                 rc = -ENODEV;
10405                 goto test_nvram_exit;
10406         }
10407
10408         for (i = 0; nvram_tbl[i].size; i++) {
10409
10410                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10411                                       nvram_tbl[i].size);
10412                 if (rc) {
10413                         DP(NETIF_MSG_PROBE,
10414                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10415                         goto test_nvram_exit;
10416                 }
10417
10418                 crc = ether_crc_le(nvram_tbl[i].size, data);
10419                 if (crc != CRC32_RESIDUAL) {
10420                         DP(NETIF_MSG_PROBE,
10421                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10422                         rc = -ENODEV;
10423                         goto test_nvram_exit;
10424                 }
10425         }
10426
10427 test_nvram_exit:
10428         return rc;
10429 }
10430
10431 static int bnx2x_test_intr(struct bnx2x *bp)
10432 {
10433         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10434         int i, rc;
10435
10436         if (!netif_running(bp->dev))
10437                 return -ENODEV;
10438
10439         config->hdr.length = 0;
10440         if (CHIP_IS_E1(bp))
10441                 /* use last unicast entries */
10442                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
10443         else
10444                 config->hdr.offset = BP_FUNC(bp);
10445         config->hdr.client_id = bp->fp->cl_id;
10446         config->hdr.reserved1 = 0;
10447
10448         bp->set_mac_pending++;
10449         smp_wmb();
10450         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10451                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10452                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10453         if (rc == 0) {
10454                 for (i = 0; i < 10; i++) {
10455                         if (!bp->set_mac_pending)
10456                                 break;
10457                         smp_rmb();
10458                         msleep_interruptible(10);
10459                 }
10460                 if (i == 10)
10461                         rc = -ENODEV;
10462         }
10463
10464         return rc;
10465 }
10466
10467 static void bnx2x_self_test(struct net_device *dev,
10468                             struct ethtool_test *etest, u64 *buf)
10469 {
10470         struct bnx2x *bp = netdev_priv(dev);
10471
10472         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10473
10474         if (!netif_running(dev))
10475                 return;
10476
10477         /* offline tests are not supported in MF mode */
10478         if (IS_E1HMF(bp))
10479                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10480
10481         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10482                 int port = BP_PORT(bp);
10483                 u32 val;
10484                 u8 link_up;
10485
10486                 /* save current value of input enable for TX port IF */
10487                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10488                 /* disable input for TX port IF */
10489                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10490
10491                 link_up = (bnx2x_link_test(bp) == 0);
10492                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10493                 bnx2x_nic_load(bp, LOAD_DIAG);
10494                 /* wait until link state is restored */
10495                 bnx2x_wait_for_link(bp, link_up);
10496
10497                 if (bnx2x_test_registers(bp) != 0) {
10498                         buf[0] = 1;
10499                         etest->flags |= ETH_TEST_FL_FAILED;
10500                 }
10501                 if (bnx2x_test_memory(bp) != 0) {
10502                         buf[1] = 1;
10503                         etest->flags |= ETH_TEST_FL_FAILED;
10504                 }
10505                 buf[2] = bnx2x_test_loopback(bp, link_up);
10506                 if (buf[2] != 0)
10507                         etest->flags |= ETH_TEST_FL_FAILED;
10508
10509                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10510
10511                 /* restore input for TX port IF */
10512                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10513
10514                 bnx2x_nic_load(bp, LOAD_NORMAL);
10515                 /* wait until link state is restored */
10516                 bnx2x_wait_for_link(bp, link_up);
10517         }
10518         if (bnx2x_test_nvram(bp) != 0) {
10519                 buf[3] = 1;
10520                 etest->flags |= ETH_TEST_FL_FAILED;
10521         }
10522         if (bnx2x_test_intr(bp) != 0) {
10523                 buf[4] = 1;
10524                 etest->flags |= ETH_TEST_FL_FAILED;
10525         }
10526         if (bp->port.pmf)
10527                 if (bnx2x_link_test(bp) != 0) {
10528                         buf[5] = 1;
10529                         etest->flags |= ETH_TEST_FL_FAILED;
10530                 }
10531
10532 #ifdef BNX2X_EXTRA_DEBUG
10533         bnx2x_panic_dump(bp);
10534 #endif
10535 }
10536
10537 static const struct {
10538         long offset;
10539         int size;
10540         u8 string[ETH_GSTRING_LEN];
10541 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10542 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10543         { Q_STATS_OFFSET32(error_bytes_received_hi),
10544                                                 8, "[%d]: rx_error_bytes" },
10545         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10546                                                 8, "[%d]: rx_ucast_packets" },
10547         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10548                                                 8, "[%d]: rx_mcast_packets" },
10549         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10550                                                 8, "[%d]: rx_bcast_packets" },
10551         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10552         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10553                                          4, "[%d]: rx_phy_ip_err_discards"},
10554         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10555                                          4, "[%d]: rx_skb_alloc_discard" },
10556         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10557
10558 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10559         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10560                                                         8, "[%d]: tx_packets" }
10561 };
10562
10563 static const struct {
10564         long offset;
10565         int size;
10566         u32 flags;
10567 #define STATS_FLAGS_PORT                1
10568 #define STATS_FLAGS_FUNC                2
10569 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10570         u8 string[ETH_GSTRING_LEN];
10571 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10572 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10573                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10574         { STATS_OFFSET32(error_bytes_received_hi),
10575                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10576         { STATS_OFFSET32(total_unicast_packets_received_hi),
10577                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10578         { STATS_OFFSET32(total_multicast_packets_received_hi),
10579                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10580         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10581                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10582         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10583                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10584         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10585                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10586         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10587                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10588         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10589                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10590 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10591                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10592         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10593                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10594         { STATS_OFFSET32(no_buff_discard_hi),
10595                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10596         { STATS_OFFSET32(mac_filter_discard),
10597                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10598         { STATS_OFFSET32(xxoverflow_discard),
10599                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10600         { STATS_OFFSET32(brb_drop_hi),
10601                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10602         { STATS_OFFSET32(brb_truncate_hi),
10603                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10604         { STATS_OFFSET32(pause_frames_received_hi),
10605                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10606         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10607                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10608         { STATS_OFFSET32(nig_timer_max),
10609                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10610 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10611                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10612         { STATS_OFFSET32(rx_skb_alloc_failed),
10613                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10614         { STATS_OFFSET32(hw_csum_err),
10615                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10616
10617         { STATS_OFFSET32(total_bytes_transmitted_hi),
10618                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10619         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10620                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10621         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10622                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10623         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10624                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10625         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10626                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10627         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10628                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10629         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10630                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10631 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10632                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10633         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10634                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10635         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10636                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10637         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10638                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10639         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10640                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10641         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10642                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10643         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10644                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10645         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10646                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10647         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10648                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10649         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10650                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10651 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10652                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10653         { STATS_OFFSET32(pause_frames_sent_hi),
10654                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10655 };
10656
10657 #define IS_PORT_STAT(i) \
10658         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10659 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10660 #define IS_E1HMF_MODE_STAT(bp) \
10661                         (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
10662
10663 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10664 {
10665         struct bnx2x *bp = netdev_priv(dev);
10666         int i, num_stats;
10667
10668         switch(stringset) {
10669         case ETH_SS_STATS:
10670                 if (is_multi(bp)) {
10671                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10672                         if (!IS_E1HMF_MODE_STAT(bp))
10673                                 num_stats += BNX2X_NUM_STATS;
10674                 } else {
10675                         if (IS_E1HMF_MODE_STAT(bp)) {
10676                                 num_stats = 0;
10677                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
10678                                         if (IS_FUNC_STAT(i))
10679                                                 num_stats++;
10680                         } else
10681                                 num_stats = BNX2X_NUM_STATS;
10682                 }
10683                 return num_stats;
10684
10685         case ETH_SS_TEST:
10686                 return BNX2X_NUM_TESTS;
10687
10688         default:
10689                 return -EINVAL;
10690         }
10691 }
10692
10693 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10694 {
10695         struct bnx2x *bp = netdev_priv(dev);
10696         int i, j, k;
10697
10698         switch (stringset) {
10699         case ETH_SS_STATS:
10700                 if (is_multi(bp)) {
10701                         k = 0;
10702                         for_each_queue(bp, i) {
10703                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10704                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10705                                                 bnx2x_q_stats_arr[j].string, i);
10706                                 k += BNX2X_NUM_Q_STATS;
10707                         }
10708                         if (IS_E1HMF_MODE_STAT(bp))
10709                                 break;
10710                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10711                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10712                                        bnx2x_stats_arr[j].string);
10713                 } else {
10714                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10715                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10716                                         continue;
10717                                 strcpy(buf + j*ETH_GSTRING_LEN,
10718                                        bnx2x_stats_arr[i].string);
10719                                 j++;
10720                         }
10721                 }
10722                 break;
10723
10724         case ETH_SS_TEST:
10725                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10726                 break;
10727         }
10728 }
10729
10730 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10731                                     struct ethtool_stats *stats, u64 *buf)
10732 {
10733         struct bnx2x *bp = netdev_priv(dev);
10734         u32 *hw_stats, *offset;
10735         int i, j, k;
10736
10737         if (is_multi(bp)) {
10738                 k = 0;
10739                 for_each_queue(bp, i) {
10740                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10741                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10742                                 if (bnx2x_q_stats_arr[j].size == 0) {
10743                                         /* skip this counter */
10744                                         buf[k + j] = 0;
10745                                         continue;
10746                                 }
10747                                 offset = (hw_stats +
10748                                           bnx2x_q_stats_arr[j].offset);
10749                                 if (bnx2x_q_stats_arr[j].size == 4) {
10750                                         /* 4-byte counter */
10751                                         buf[k + j] = (u64) *offset;
10752                                         continue;
10753                                 }
10754                                 /* 8-byte counter */
10755                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10756                         }
10757                         k += BNX2X_NUM_Q_STATS;
10758                 }
10759                 if (IS_E1HMF_MODE_STAT(bp))
10760                         return;
10761                 hw_stats = (u32 *)&bp->eth_stats;
10762                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10763                         if (bnx2x_stats_arr[j].size == 0) {
10764                                 /* skip this counter */
10765                                 buf[k + j] = 0;
10766                                 continue;
10767                         }
10768                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10769                         if (bnx2x_stats_arr[j].size == 4) {
10770                                 /* 4-byte counter */
10771                                 buf[k + j] = (u64) *offset;
10772                                 continue;
10773                         }
10774                         /* 8-byte counter */
10775                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10776                 }
10777         } else {
10778                 hw_stats = (u32 *)&bp->eth_stats;
10779                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10780                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10781                                 continue;
10782                         if (bnx2x_stats_arr[i].size == 0) {
10783                                 /* skip this counter */
10784                                 buf[j] = 0;
10785                                 j++;
10786                                 continue;
10787                         }
10788                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10789                         if (bnx2x_stats_arr[i].size == 4) {
10790                                 /* 4-byte counter */
10791                                 buf[j] = (u64) *offset;
10792                                 j++;
10793                                 continue;
10794                         }
10795                         /* 8-byte counter */
10796                         buf[j] = HILO_U64(*offset, *(offset + 1));
10797                         j++;
10798                 }
10799         }
10800 }
10801
10802 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10803 {
10804         struct bnx2x *bp = netdev_priv(dev);
10805         int i;
10806
10807         if (!netif_running(dev))
10808                 return 0;
10809
10810         if (!bp->port.pmf)
10811                 return 0;
10812
10813         if (data == 0)
10814                 data = 2;
10815
10816         for (i = 0; i < (data * 2); i++) {
10817                 if ((i % 2) == 0)
10818                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10819                                       SPEED_1000);
10820                 else
10821                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10822
10823                 msleep_interruptible(500);
10824                 if (signal_pending(current))
10825                         break;
10826         }
10827
10828         if (bp->link_vars.link_up)
10829                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10830                               bp->link_vars.line_speed);
10831
10832         return 0;
10833 }
10834
10835 static const struct ethtool_ops bnx2x_ethtool_ops = {
10836         .get_settings           = bnx2x_get_settings,
10837         .set_settings           = bnx2x_set_settings,
10838         .get_drvinfo            = bnx2x_get_drvinfo,
10839         .get_regs_len           = bnx2x_get_regs_len,
10840         .get_regs               = bnx2x_get_regs,
10841         .get_wol                = bnx2x_get_wol,
10842         .set_wol                = bnx2x_set_wol,
10843         .get_msglevel           = bnx2x_get_msglevel,
10844         .set_msglevel           = bnx2x_set_msglevel,
10845         .nway_reset             = bnx2x_nway_reset,
10846         .get_link               = bnx2x_get_link,
10847         .get_eeprom_len         = bnx2x_get_eeprom_len,
10848         .get_eeprom             = bnx2x_get_eeprom,
10849         .set_eeprom             = bnx2x_set_eeprom,
10850         .get_coalesce           = bnx2x_get_coalesce,
10851         .set_coalesce           = bnx2x_set_coalesce,
10852         .get_ringparam          = bnx2x_get_ringparam,
10853         .set_ringparam          = bnx2x_set_ringparam,
10854         .get_pauseparam         = bnx2x_get_pauseparam,
10855         .set_pauseparam         = bnx2x_set_pauseparam,
10856         .get_rx_csum            = bnx2x_get_rx_csum,
10857         .set_rx_csum            = bnx2x_set_rx_csum,
10858         .get_tx_csum            = ethtool_op_get_tx_csum,
10859         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10860         .set_flags              = bnx2x_set_flags,
10861         .get_flags              = ethtool_op_get_flags,
10862         .get_sg                 = ethtool_op_get_sg,
10863         .set_sg                 = ethtool_op_set_sg,
10864         .get_tso                = ethtool_op_get_tso,
10865         .set_tso                = bnx2x_set_tso,
10866         .self_test              = bnx2x_self_test,
10867         .get_sset_count         = bnx2x_get_sset_count,
10868         .get_strings            = bnx2x_get_strings,
10869         .phys_id                = bnx2x_phys_id,
10870         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10871 };
10872
10873 /* end of ethtool_ops */
10874
10875 /****************************************************************************
10876 * General service functions
10877 ****************************************************************************/
10878
10879 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10880 {
10881         u16 pmcsr;
10882
10883         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10884
10885         switch (state) {
10886         case PCI_D0:
10887                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10888                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10889                                        PCI_PM_CTRL_PME_STATUS));
10890
10891                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10892                         /* delay required during transition out of D3hot */
10893                         msleep(20);
10894                 break;
10895
10896         case PCI_D3hot:
10897                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10898                 pmcsr |= 3;
10899
10900                 if (bp->wol)
10901                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10902
10903                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10904                                       pmcsr);
10905
10906                 /* No more memory access after this point until
10907                 * device is brought back to D0.
10908                 */
10909                 break;
10910
10911         default:
10912                 return -EINVAL;
10913         }
10914         return 0;
10915 }
10916
10917 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10918 {
10919         u16 rx_cons_sb;
10920
10921         /* Tell compiler that status block fields can change */
10922         barrier();
10923         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10924         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10925                 rx_cons_sb++;
10926         return (fp->rx_comp_cons != rx_cons_sb);
10927 }
10928
10929 /*
10930  * net_device service functions
10931  */
10932
10933 static int bnx2x_poll(struct napi_struct *napi, int budget)
10934 {
10935         int work_done = 0;
10936         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10937                                                  napi);
10938         struct bnx2x *bp = fp->bp;
10939
10940         while (1) {
10941 #ifdef BNX2X_STOP_ON_ERROR
10942                 if (unlikely(bp->panic)) {
10943                         napi_complete(napi);
10944                         return 0;
10945                 }
10946 #endif
10947
10948                 if (bnx2x_has_tx_work(fp))
10949                         bnx2x_tx_int(fp);
10950
10951                 if (bnx2x_has_rx_work(fp)) {
10952                         work_done += bnx2x_rx_int(fp, budget - work_done);
10953
10954                         /* must not complete if we consumed full budget */
10955                         if (work_done >= budget)
10956                                 break;
10957                 }
10958
10959                 /* Fall out from the NAPI loop if needed */
10960                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10961                         bnx2x_update_fpsb_idx(fp);
10962                 /* bnx2x_has_rx_work() reads the status block, thus we need
10963                  * to ensure that status block indices have been actually read
10964                  * (bnx2x_update_fpsb_idx) prior to this check
10965                  * (bnx2x_has_rx_work) so that we won't write the "newer"
10966                  * value of the status block to IGU (if there was a DMA right
10967                  * after bnx2x_has_rx_work and if there is no rmb, the memory
10968                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
10969                  * before bnx2x_ack_sb). In this case there will never be
10970                  * another interrupt until there is another update of the
10971                  * status block, while there is still unhandled work.
10972                  */
10973                         rmb();
10974
10975                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10976                                 napi_complete(napi);
10977                                 /* Re-enable interrupts */
10978                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10979                                              le16_to_cpu(fp->fp_c_idx),
10980                                              IGU_INT_NOP, 1);
10981                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10982                                              le16_to_cpu(fp->fp_u_idx),
10983                                              IGU_INT_ENABLE, 1);
10984                                 break;
10985                         }
10986                 }
10987         }
10988
10989         return work_done;
10990 }
10991
10992
10993 /* we split the first BD into headers and data BDs
10994  * to ease the pain of our fellow microcode engineers
10995  * we use one mapping for both BDs
10996  * So far this has only been observed to happen
10997  * in Other Operating Systems(TM)
10998  */
10999 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11000                                    struct bnx2x_fastpath *fp,
11001                                    struct sw_tx_bd *tx_buf,
11002                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
11003                                    u16 bd_prod, int nbd)
11004 {
11005         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
11006         struct eth_tx_bd *d_tx_bd;
11007         dma_addr_t mapping;
11008         int old_len = le16_to_cpu(h_tx_bd->nbytes);
11009
11010         /* first fix first BD */
11011         h_tx_bd->nbd = cpu_to_le16(nbd);
11012         h_tx_bd->nbytes = cpu_to_le16(hlen);
11013
11014         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11015            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11016            h_tx_bd->addr_lo, h_tx_bd->nbd);
11017
11018         /* now get a new data BD
11019          * (after the pbd) and fill it */
11020         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11021         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11022
11023         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11024                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11025
11026         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11027         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11028         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11029
11030         /* this marks the BD as one that has no individual mapping */
11031         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11032
11033         DP(NETIF_MSG_TX_QUEUED,
11034            "TSO split data size is %d (%x:%x)\n",
11035            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11036
11037         /* update tx_bd */
11038         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11039
11040         return bd_prod;
11041 }
11042
11043 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11044 {
11045         if (fix > 0)
11046                 csum = (u16) ~csum_fold(csum_sub(csum,
11047                                 csum_partial(t_header - fix, fix, 0)));
11048
11049         else if (fix < 0)
11050                 csum = (u16) ~csum_fold(csum_add(csum,
11051                                 csum_partial(t_header, -fix, 0)));
11052
11053         return swab16(csum);
11054 }
11055
11056 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11057 {
11058         u32 rc;
11059
11060         if (skb->ip_summed != CHECKSUM_PARTIAL)
11061                 rc = XMIT_PLAIN;
11062
11063         else {
11064                 if (skb->protocol == htons(ETH_P_IPV6)) {
11065                         rc = XMIT_CSUM_V6;
11066                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11067                                 rc |= XMIT_CSUM_TCP;
11068
11069                 } else {
11070                         rc = XMIT_CSUM_V4;
11071                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11072                                 rc |= XMIT_CSUM_TCP;
11073                 }
11074         }
11075
11076         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11077                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
11078
11079         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11080                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
11081
11082         return rc;
11083 }
11084
11085 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11086 /* check if packet requires linearization (packet is too fragmented)
11087    no need to check fragmentation if page size > 8K (there will be no
11088    violation to FW restrictions) */
11089 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11090                              u32 xmit_type)
11091 {
11092         int to_copy = 0;
11093         int hlen = 0;
11094         int first_bd_sz = 0;
11095
11096         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11097         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11098
11099                 if (xmit_type & XMIT_GSO) {
11100                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11101                         /* Check if LSO packet needs to be copied:
11102                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11103                         int wnd_size = MAX_FETCH_BD - 3;
11104                         /* Number of windows to check */
11105                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11106                         int wnd_idx = 0;
11107                         int frag_idx = 0;
11108                         u32 wnd_sum = 0;
11109
11110                         /* Headers length */
11111                         hlen = (int)(skb_transport_header(skb) - skb->data) +
11112                                 tcp_hdrlen(skb);
11113
11114                         /* Amount of data (w/o headers) on linear part of SKB*/
11115                         first_bd_sz = skb_headlen(skb) - hlen;
11116
11117                         wnd_sum  = first_bd_sz;
11118
11119                         /* Calculate the first sum - it's special */
11120                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11121                                 wnd_sum +=
11122                                         skb_shinfo(skb)->frags[frag_idx].size;
11123
11124                         /* If there was data on linear skb data - check it */
11125                         if (first_bd_sz > 0) {
11126                                 if (unlikely(wnd_sum < lso_mss)) {
11127                                         to_copy = 1;
11128                                         goto exit_lbl;
11129                                 }
11130
11131                                 wnd_sum -= first_bd_sz;
11132                         }
11133
11134                         /* Others are easier: run through the frag list and
11135                            check all windows */
11136                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11137                                 wnd_sum +=
11138                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11139
11140                                 if (unlikely(wnd_sum < lso_mss)) {
11141                                         to_copy = 1;
11142                                         break;
11143                                 }
11144                                 wnd_sum -=
11145                                         skb_shinfo(skb)->frags[wnd_idx].size;
11146                         }
11147                 } else {
11148                         /* in non-LSO too fragmented packet should always
11149                            be linearized */
11150                         to_copy = 1;
11151                 }
11152         }
11153
11154 exit_lbl:
11155         if (unlikely(to_copy))
11156                 DP(NETIF_MSG_TX_QUEUED,
11157                    "Linearization IS REQUIRED for %s packet. "
11158                    "num_frags %d  hlen %d  first_bd_sz %d\n",
11159                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11160                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11161
11162         return to_copy;
11163 }
11164 #endif
11165
11166 /* called with netif_tx_lock
11167  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11168  * netif_wake_queue()
11169  */
11170 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11171 {
11172         struct bnx2x *bp = netdev_priv(dev);
11173         struct bnx2x_fastpath *fp;
11174         struct netdev_queue *txq;
11175         struct sw_tx_bd *tx_buf;
11176         struct eth_tx_start_bd *tx_start_bd;
11177         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11178         struct eth_tx_parse_bd *pbd = NULL;
11179         u16 pkt_prod, bd_prod;
11180         int nbd, fp_index;
11181         dma_addr_t mapping;
11182         u32 xmit_type = bnx2x_xmit_type(bp, skb);
11183         int i;
11184         u8 hlen = 0;
11185         __le16 pkt_size = 0;
11186
11187 #ifdef BNX2X_STOP_ON_ERROR
11188         if (unlikely(bp->panic))
11189                 return NETDEV_TX_BUSY;
11190 #endif
11191
11192         fp_index = skb_get_queue_mapping(skb);
11193         txq = netdev_get_tx_queue(dev, fp_index);
11194
11195         fp = &bp->fp[fp_index];
11196
11197         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11198                 fp->eth_q_stats.driver_xoff++;
11199                 netif_tx_stop_queue(txq);
11200                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11201                 return NETDEV_TX_BUSY;
11202         }
11203
11204         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
11205            "  gso type %x  xmit_type %x\n",
11206            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11207            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11208
11209 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11210         /* First, check if we need to linearize the skb (due to FW
11211            restrictions). No need to check fragmentation if page size > 8K
11212            (there will be no violation to FW restrictions) */
11213         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11214                 /* Statistics of linearization */
11215                 bp->lin_cnt++;
11216                 if (skb_linearize(skb) != 0) {
11217                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11218                            "silently dropping this SKB\n");
11219                         dev_kfree_skb_any(skb);
11220                         return NETDEV_TX_OK;
11221                 }
11222         }
11223 #endif
11224
11225         /*
11226         Please read carefully. First we use one BD which we mark as start,
11227         then we have a parsing info BD (used for TSO or xsum),
11228         and only then we have the rest of the TSO BDs.
11229         (don't forget to mark the last one as last,
11230         and to unmap only AFTER you write to the BD ...)
11231         And above all, all pdb sizes are in words - NOT DWORDS!
11232         */
11233
11234         pkt_prod = fp->tx_pkt_prod++;
11235         bd_prod = TX_BD(fp->tx_bd_prod);
11236
11237         /* get a tx_buf and first BD */
11238         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11239         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11240
11241         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11242         tx_start_bd->general_data = (UNICAST_ADDRESS <<
11243                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11244         /* header nbd */
11245         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11246
11247         /* remember the first BD of the packet */
11248         tx_buf->first_bd = fp->tx_bd_prod;
11249         tx_buf->skb = skb;
11250         tx_buf->flags = 0;
11251
11252         DP(NETIF_MSG_TX_QUEUED,
11253            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
11254            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11255
11256 #ifdef BCM_VLAN
11257         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11258             (bp->flags & HW_VLAN_TX_FLAG)) {
11259                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11260                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11261         } else
11262 #endif
11263                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11264
11265         /* turn on parsing and get a BD */
11266         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11267         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11268
11269         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11270
11271         if (xmit_type & XMIT_CSUM) {
11272                 hlen = (skb_network_header(skb) - skb->data) / 2;
11273
11274                 /* for now NS flag is not used in Linux */
11275                 pbd->global_data =
11276                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11277                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11278
11279                 pbd->ip_hlen = (skb_transport_header(skb) -
11280                                 skb_network_header(skb)) / 2;
11281
11282                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11283
11284                 pbd->total_hlen = cpu_to_le16(hlen);
11285                 hlen = hlen*2;
11286
11287                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11288
11289                 if (xmit_type & XMIT_CSUM_V4)
11290                         tx_start_bd->bd_flags.as_bitfield |=
11291                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11292                 else
11293                         tx_start_bd->bd_flags.as_bitfield |=
11294                                                 ETH_TX_BD_FLAGS_IPV6;
11295
11296                 if (xmit_type & XMIT_CSUM_TCP) {
11297                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11298
11299                 } else {
11300                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11301
11302                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11303
11304                         DP(NETIF_MSG_TX_QUEUED,
11305                            "hlen %d  fix %d  csum before fix %x\n",
11306                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11307
11308                         /* HW bug: fixup the CSUM */
11309                         pbd->tcp_pseudo_csum =
11310                                 bnx2x_csum_fix(skb_transport_header(skb),
11311                                                SKB_CS(skb), fix);
11312
11313                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11314                            pbd->tcp_pseudo_csum);
11315                 }
11316         }
11317
11318         mapping = pci_map_single(bp->pdev, skb->data,
11319                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11320
11321         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11322         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11323         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11324         tx_start_bd->nbd = cpu_to_le16(nbd);
11325         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11326         pkt_size = tx_start_bd->nbytes;
11327
11328         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11329            "  nbytes %d  flags %x  vlan %x\n",
11330            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11331            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11332            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11333
11334         if (xmit_type & XMIT_GSO) {
11335
11336                 DP(NETIF_MSG_TX_QUEUED,
11337                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11338                    skb->len, hlen, skb_headlen(skb),
11339                    skb_shinfo(skb)->gso_size);
11340
11341                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11342
11343                 if (unlikely(skb_headlen(skb) > hlen))
11344                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11345                                                  hlen, bd_prod, ++nbd);
11346
11347                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11348                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11349                 pbd->tcp_flags = pbd_tcp_flags(skb);
11350
11351                 if (xmit_type & XMIT_GSO_V4) {
11352                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11353                         pbd->tcp_pseudo_csum =
11354                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11355                                                           ip_hdr(skb)->daddr,
11356                                                           0, IPPROTO_TCP, 0));
11357
11358                 } else
11359                         pbd->tcp_pseudo_csum =
11360                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11361                                                         &ipv6_hdr(skb)->daddr,
11362                                                         0, IPPROTO_TCP, 0));
11363
11364                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11365         }
11366         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11367
11368         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11369                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11370
11371                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11372                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11373                 if (total_pkt_bd == NULL)
11374                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11375
11376                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11377                                        frag->size, PCI_DMA_TODEVICE);
11378
11379                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11380                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11381                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11382                 le16_add_cpu(&pkt_size, frag->size);
11383
11384                 DP(NETIF_MSG_TX_QUEUED,
11385                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11386                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11387                    le16_to_cpu(tx_data_bd->nbytes));
11388         }
11389
11390         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11391
11392         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11393
11394         /* now send a tx doorbell, counting the next BD
11395          * if the packet contains or ends with it
11396          */
11397         if (TX_BD_POFF(bd_prod) < nbd)
11398                 nbd++;
11399
11400         if (total_pkt_bd != NULL)
11401                 total_pkt_bd->total_pkt_bytes = pkt_size;
11402
11403         if (pbd)
11404                 DP(NETIF_MSG_TX_QUEUED,
11405                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11406                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11407                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11408                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11409                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11410
11411         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11412
11413         /*
11414          * Make sure that the BD data is updated before updating the producer
11415          * since FW might read the BD right after the producer is updated.
11416          * This is only applicable for weak-ordered memory model archs such
11417          * as IA-64. The following barrier is also mandatory since FW will
11418          * assumes packets must have BDs.
11419          */
11420         wmb();
11421
11422         fp->tx_db.data.prod += nbd;
11423         barrier();
11424         DOORBELL(bp, fp->index, fp->tx_db.raw);
11425
11426         mmiowb();
11427
11428         fp->tx_bd_prod += nbd;
11429
11430         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11431                 netif_tx_stop_queue(txq);
11432                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11433                    if we put Tx into XOFF state. */
11434                 smp_mb();
11435                 fp->eth_q_stats.driver_xoff++;
11436                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11437                         netif_tx_wake_queue(txq);
11438         }
11439         fp->tx_pkt++;
11440
11441         return NETDEV_TX_OK;
11442 }
11443
11444 /* called with rtnl_lock */
11445 static int bnx2x_open(struct net_device *dev)
11446 {
11447         struct bnx2x *bp = netdev_priv(dev);
11448
11449         netif_carrier_off(dev);
11450
11451         bnx2x_set_power_state(bp, PCI_D0);
11452
11453         return bnx2x_nic_load(bp, LOAD_OPEN);
11454 }
11455
11456 /* called with rtnl_lock */
11457 static int bnx2x_close(struct net_device *dev)
11458 {
11459         struct bnx2x *bp = netdev_priv(dev);
11460
11461         /* Unload the driver, release IRQs */
11462         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11463         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11464                 if (!CHIP_REV_IS_SLOW(bp))
11465                         bnx2x_set_power_state(bp, PCI_D3hot);
11466
11467         return 0;
11468 }
11469
11470 /* called with netif_tx_lock from dev_mcast.c */
11471 static void bnx2x_set_rx_mode(struct net_device *dev)
11472 {
11473         struct bnx2x *bp = netdev_priv(dev);
11474         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11475         int port = BP_PORT(bp);
11476
11477         if (bp->state != BNX2X_STATE_OPEN) {
11478                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11479                 return;
11480         }
11481
11482         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11483
11484         if (dev->flags & IFF_PROMISC)
11485                 rx_mode = BNX2X_RX_MODE_PROMISC;
11486
11487         else if ((dev->flags & IFF_ALLMULTI) ||
11488                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11489                   CHIP_IS_E1(bp)))
11490                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11491
11492         else { /* some multicasts */
11493                 if (CHIP_IS_E1(bp)) {
11494                         int i, old, offset;
11495                         struct dev_mc_list *mclist;
11496                         struct mac_configuration_cmd *config =
11497                                                 bnx2x_sp(bp, mcast_config);
11498
11499                         i = 0;
11500                         netdev_for_each_mc_addr(mclist, dev) {
11501                                 config->config_table[i].
11502                                         cam_entry.msb_mac_addr =
11503                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11504                                 config->config_table[i].
11505                                         cam_entry.middle_mac_addr =
11506                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11507                                 config->config_table[i].
11508                                         cam_entry.lsb_mac_addr =
11509                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11510                                 config->config_table[i].cam_entry.flags =
11511                                                         cpu_to_le16(port);
11512                                 config->config_table[i].
11513                                         target_table_entry.flags = 0;
11514                                 config->config_table[i].target_table_entry.
11515                                         clients_bit_vector =
11516                                                 cpu_to_le32(1 << BP_L_ID(bp));
11517                                 config->config_table[i].
11518                                         target_table_entry.vlan_id = 0;
11519
11520                                 DP(NETIF_MSG_IFUP,
11521                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11522                                    config->config_table[i].
11523                                                 cam_entry.msb_mac_addr,
11524                                    config->config_table[i].
11525                                                 cam_entry.middle_mac_addr,
11526                                    config->config_table[i].
11527                                                 cam_entry.lsb_mac_addr);
11528                                 i++;
11529                         }
11530                         old = config->hdr.length;
11531                         if (old > i) {
11532                                 for (; i < old; i++) {
11533                                         if (CAM_IS_INVALID(config->
11534                                                            config_table[i])) {
11535                                                 /* already invalidated */
11536                                                 break;
11537                                         }
11538                                         /* invalidate */
11539                                         CAM_INVALIDATE(config->
11540                                                        config_table[i]);
11541                                 }
11542                         }
11543
11544                         if (CHIP_REV_IS_SLOW(bp))
11545                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11546                         else
11547                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11548
11549                         config->hdr.length = i;
11550                         config->hdr.offset = offset;
11551                         config->hdr.client_id = bp->fp->cl_id;
11552                         config->hdr.reserved1 = 0;
11553
11554                         bp->set_mac_pending++;
11555                         smp_wmb();
11556
11557                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11558                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11559                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11560                                       0);
11561                 } else { /* E1H */
11562                         /* Accept one or more multicasts */
11563                         struct dev_mc_list *mclist;
11564                         u32 mc_filter[MC_HASH_SIZE];
11565                         u32 crc, bit, regidx;
11566                         int i;
11567
11568                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11569
11570                         netdev_for_each_mc_addr(mclist, dev) {
11571                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11572                                    mclist->dmi_addr);
11573
11574                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11575                                 bit = (crc >> 24) & 0xff;
11576                                 regidx = bit >> 5;
11577                                 bit &= 0x1f;
11578                                 mc_filter[regidx] |= (1 << bit);
11579                         }
11580
11581                         for (i = 0; i < MC_HASH_SIZE; i++)
11582                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11583                                        mc_filter[i]);
11584                 }
11585         }
11586
11587         bp->rx_mode = rx_mode;
11588         bnx2x_set_storm_rx_mode(bp);
11589 }
11590
11591 /* called with rtnl_lock */
11592 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11593 {
11594         struct sockaddr *addr = p;
11595         struct bnx2x *bp = netdev_priv(dev);
11596
11597         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11598                 return -EINVAL;
11599
11600         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11601         if (netif_running(dev)) {
11602                 if (CHIP_IS_E1(bp))
11603                         bnx2x_set_eth_mac_addr_e1(bp, 1);
11604                 else
11605                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
11606         }
11607
11608         return 0;
11609 }
11610
11611 /* called with rtnl_lock */
11612 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11613                            int devad, u16 addr)
11614 {
11615         struct bnx2x *bp = netdev_priv(netdev);
11616         u16 value;
11617         int rc;
11618         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11619
11620         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11621            prtad, devad, addr);
11622
11623         if (prtad != bp->mdio.prtad) {
11624                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11625                    prtad, bp->mdio.prtad);
11626                 return -EINVAL;
11627         }
11628
11629         /* The HW expects different devad if CL22 is used */
11630         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11631
11632         bnx2x_acquire_phy_lock(bp);
11633         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11634                              devad, addr, &value);
11635         bnx2x_release_phy_lock(bp);
11636         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11637
11638         if (!rc)
11639                 rc = value;
11640         return rc;
11641 }
11642
11643 /* called with rtnl_lock */
11644 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11645                             u16 addr, u16 value)
11646 {
11647         struct bnx2x *bp = netdev_priv(netdev);
11648         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11649         int rc;
11650
11651         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11652                            " value 0x%x\n", prtad, devad, addr, value);
11653
11654         if (prtad != bp->mdio.prtad) {
11655                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11656                    prtad, bp->mdio.prtad);
11657                 return -EINVAL;
11658         }
11659
11660         /* The HW expects different devad if CL22 is used */
11661         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11662
11663         bnx2x_acquire_phy_lock(bp);
11664         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11665                               devad, addr, value);
11666         bnx2x_release_phy_lock(bp);
11667         return rc;
11668 }
11669
11670 /* called with rtnl_lock */
11671 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11672 {
11673         struct bnx2x *bp = netdev_priv(dev);
11674         struct mii_ioctl_data *mdio = if_mii(ifr);
11675
11676         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11677            mdio->phy_id, mdio->reg_num, mdio->val_in);
11678
11679         if (!netif_running(dev))
11680                 return -EAGAIN;
11681
11682         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11683 }
11684
11685 /* called with rtnl_lock */
11686 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11687 {
11688         struct bnx2x *bp = netdev_priv(dev);
11689         int rc = 0;
11690
11691         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11692             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11693                 return -EINVAL;
11694
11695         /* This does not race with packet allocation
11696          * because the actual alloc size is
11697          * only updated as part of load
11698          */
11699         dev->mtu = new_mtu;
11700
11701         if (netif_running(dev)) {
11702                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11703                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11704         }
11705
11706         return rc;
11707 }
11708
11709 static void bnx2x_tx_timeout(struct net_device *dev)
11710 {
11711         struct bnx2x *bp = netdev_priv(dev);
11712
11713 #ifdef BNX2X_STOP_ON_ERROR
11714         if (!bp->panic)
11715                 bnx2x_panic();
11716 #endif
11717         /* This allows the netif to be shutdown gracefully before resetting */
11718         schedule_work(&bp->reset_task);
11719 }
11720
11721 #ifdef BCM_VLAN
11722 /* called with rtnl_lock */
11723 static void bnx2x_vlan_rx_register(struct net_device *dev,
11724                                    struct vlan_group *vlgrp)
11725 {
11726         struct bnx2x *bp = netdev_priv(dev);
11727
11728         bp->vlgrp = vlgrp;
11729
11730         /* Set flags according to the required capabilities */
11731         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11732
11733         if (dev->features & NETIF_F_HW_VLAN_TX)
11734                 bp->flags |= HW_VLAN_TX_FLAG;
11735
11736         if (dev->features & NETIF_F_HW_VLAN_RX)
11737                 bp->flags |= HW_VLAN_RX_FLAG;
11738
11739         if (netif_running(dev))
11740                 bnx2x_set_client_config(bp);
11741 }
11742
11743 #endif
11744
11745 #ifdef CONFIG_NET_POLL_CONTROLLER
11746 static void poll_bnx2x(struct net_device *dev)
11747 {
11748         struct bnx2x *bp = netdev_priv(dev);
11749
11750         disable_irq(bp->pdev->irq);
11751         bnx2x_interrupt(bp->pdev->irq, dev);
11752         enable_irq(bp->pdev->irq);
11753 }
11754 #endif
11755
11756 static const struct net_device_ops bnx2x_netdev_ops = {
11757         .ndo_open               = bnx2x_open,
11758         .ndo_stop               = bnx2x_close,
11759         .ndo_start_xmit         = bnx2x_start_xmit,
11760         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11761         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11762         .ndo_validate_addr      = eth_validate_addr,
11763         .ndo_do_ioctl           = bnx2x_ioctl,
11764         .ndo_change_mtu         = bnx2x_change_mtu,
11765         .ndo_tx_timeout         = bnx2x_tx_timeout,
11766 #ifdef BCM_VLAN
11767         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11768 #endif
11769 #ifdef CONFIG_NET_POLL_CONTROLLER
11770         .ndo_poll_controller    = poll_bnx2x,
11771 #endif
11772 };
11773
11774 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11775                                     struct net_device *dev)
11776 {
11777         struct bnx2x *bp;
11778         int rc;
11779
11780         SET_NETDEV_DEV(dev, &pdev->dev);
11781         bp = netdev_priv(dev);
11782
11783         bp->dev = dev;
11784         bp->pdev = pdev;
11785         bp->flags = 0;
11786         bp->func = PCI_FUNC(pdev->devfn);
11787
11788         rc = pci_enable_device(pdev);
11789         if (rc) {
11790                 pr_err("Cannot enable PCI device, aborting\n");
11791                 goto err_out;
11792         }
11793
11794         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11795                 pr_err("Cannot find PCI device base address, aborting\n");
11796                 rc = -ENODEV;
11797                 goto err_out_disable;
11798         }
11799
11800         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11801                 pr_err("Cannot find second PCI device base address, aborting\n");
11802                 rc = -ENODEV;
11803                 goto err_out_disable;
11804         }
11805
11806         if (atomic_read(&pdev->enable_cnt) == 1) {
11807                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11808                 if (rc) {
11809                         pr_err("Cannot obtain PCI resources, aborting\n");
11810                         goto err_out_disable;
11811                 }
11812
11813                 pci_set_master(pdev);
11814                 pci_save_state(pdev);
11815         }
11816
11817         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11818         if (bp->pm_cap == 0) {
11819                 pr_err("Cannot find power management capability, aborting\n");
11820                 rc = -EIO;
11821                 goto err_out_release;
11822         }
11823
11824         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11825         if (bp->pcie_cap == 0) {
11826                 pr_err("Cannot find PCI Express capability, aborting\n");
11827                 rc = -EIO;
11828                 goto err_out_release;
11829         }
11830
11831         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11832                 bp->flags |= USING_DAC_FLAG;
11833                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11834                         pr_err("pci_set_consistent_dma_mask failed, aborting\n");
11835                         rc = -EIO;
11836                         goto err_out_release;
11837                 }
11838
11839         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11840                 pr_err("System does not support DMA, aborting\n");
11841                 rc = -EIO;
11842                 goto err_out_release;
11843         }
11844
11845         dev->mem_start = pci_resource_start(pdev, 0);
11846         dev->base_addr = dev->mem_start;
11847         dev->mem_end = pci_resource_end(pdev, 0);
11848
11849         dev->irq = pdev->irq;
11850
11851         bp->regview = pci_ioremap_bar(pdev, 0);
11852         if (!bp->regview) {
11853                 pr_err("Cannot map register space, aborting\n");
11854                 rc = -ENOMEM;
11855                 goto err_out_release;
11856         }
11857
11858         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11859                                         min_t(u64, BNX2X_DB_SIZE,
11860                                               pci_resource_len(pdev, 2)));
11861         if (!bp->doorbells) {
11862                 pr_err("Cannot map doorbell space, aborting\n");
11863                 rc = -ENOMEM;
11864                 goto err_out_unmap;
11865         }
11866
11867         bnx2x_set_power_state(bp, PCI_D0);
11868
11869         /* clean indirect addresses */
11870         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11871                                PCICFG_VENDOR_ID_OFFSET);
11872         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11873         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11874         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11875         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11876
11877         dev->watchdog_timeo = TX_TIMEOUT;
11878
11879         dev->netdev_ops = &bnx2x_netdev_ops;
11880         dev->ethtool_ops = &bnx2x_ethtool_ops;
11881         dev->features |= NETIF_F_SG;
11882         dev->features |= NETIF_F_HW_CSUM;
11883         if (bp->flags & USING_DAC_FLAG)
11884                 dev->features |= NETIF_F_HIGHDMA;
11885         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11886         dev->features |= NETIF_F_TSO6;
11887 #ifdef BCM_VLAN
11888         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11889         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11890
11891         dev->vlan_features |= NETIF_F_SG;
11892         dev->vlan_features |= NETIF_F_HW_CSUM;
11893         if (bp->flags & USING_DAC_FLAG)
11894                 dev->vlan_features |= NETIF_F_HIGHDMA;
11895         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11896         dev->vlan_features |= NETIF_F_TSO6;
11897 #endif
11898
11899         /* get_port_hwinfo() will set prtad and mmds properly */
11900         bp->mdio.prtad = MDIO_PRTAD_NONE;
11901         bp->mdio.mmds = 0;
11902         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11903         bp->mdio.dev = dev;
11904         bp->mdio.mdio_read = bnx2x_mdio_read;
11905         bp->mdio.mdio_write = bnx2x_mdio_write;
11906
11907         return 0;
11908
11909 err_out_unmap:
11910         if (bp->regview) {
11911                 iounmap(bp->regview);
11912                 bp->regview = NULL;
11913         }
11914         if (bp->doorbells) {
11915                 iounmap(bp->doorbells);
11916                 bp->doorbells = NULL;
11917         }
11918
11919 err_out_release:
11920         if (atomic_read(&pdev->enable_cnt) == 1)
11921                 pci_release_regions(pdev);
11922
11923 err_out_disable:
11924         pci_disable_device(pdev);
11925         pci_set_drvdata(pdev, NULL);
11926
11927 err_out:
11928         return rc;
11929 }
11930
11931 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11932                                                  int *width, int *speed)
11933 {
11934         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11935
11936         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11937
11938         /* return value of 1=2.5GHz 2=5GHz */
11939         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11940 }
11941
11942 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11943 {
11944         const struct firmware *firmware = bp->firmware;
11945         struct bnx2x_fw_file_hdr *fw_hdr;
11946         struct bnx2x_fw_file_section *sections;
11947         u32 offset, len, num_ops;
11948         u16 *ops_offsets;
11949         int i;
11950         const u8 *fw_ver;
11951
11952         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11953                 return -EINVAL;
11954
11955         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11956         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11957
11958         /* Make sure none of the offsets and sizes make us read beyond
11959          * the end of the firmware data */
11960         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11961                 offset = be32_to_cpu(sections[i].offset);
11962                 len = be32_to_cpu(sections[i].len);
11963                 if (offset + len > firmware->size) {
11964                         pr_err("Section %d length is out of bounds\n", i);
11965                         return -EINVAL;
11966                 }
11967         }
11968
11969         /* Likewise for the init_ops offsets */
11970         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11971         ops_offsets = (u16 *)(firmware->data + offset);
11972         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11973
11974         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11975                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11976                         pr_err("Section offset %d is out of bounds\n", i);
11977                         return -EINVAL;
11978                 }
11979         }
11980
11981         /* Check FW version */
11982         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11983         fw_ver = firmware->data + offset;
11984         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11985             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11986             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11987             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11988                 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11989                        fw_ver[0], fw_ver[1], fw_ver[2],
11990                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11991                        BCM_5710_FW_MINOR_VERSION,
11992                        BCM_5710_FW_REVISION_VERSION,
11993                        BCM_5710_FW_ENGINEERING_VERSION);
11994                 return -EINVAL;
11995         }
11996
11997         return 0;
11998 }
11999
12000 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12001 {
12002         const __be32 *source = (const __be32 *)_source;
12003         u32 *target = (u32 *)_target;
12004         u32 i;
12005
12006         for (i = 0; i < n/4; i++)
12007                 target[i] = be32_to_cpu(source[i]);
12008 }
12009
12010 /*
12011    Ops array is stored in the following format:
12012    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12013  */
12014 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12015 {
12016         const __be32 *source = (const __be32 *)_source;
12017         struct raw_op *target = (struct raw_op *)_target;
12018         u32 i, j, tmp;
12019
12020         for (i = 0, j = 0; i < n/8; i++, j += 2) {
12021                 tmp = be32_to_cpu(source[j]);
12022                 target[i].op = (tmp >> 24) & 0xff;
12023                 target[i].offset =  tmp & 0xffffff;
12024                 target[i].raw_data = be32_to_cpu(source[j+1]);
12025         }
12026 }
12027
12028 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12029 {
12030         const __be16 *source = (const __be16 *)_source;
12031         u16 *target = (u16 *)_target;
12032         u32 i;
12033
12034         for (i = 0; i < n/2; i++)
12035                 target[i] = be16_to_cpu(source[i]);
12036 }
12037
12038 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
12039 do {                                                                    \
12040         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
12041         bp->arr = kmalloc(len, GFP_KERNEL);                             \
12042         if (!bp->arr) {                                                 \
12043                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12044                 goto lbl;                                               \
12045         }                                                               \
12046         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
12047              (u8 *)bp->arr, len);                                       \
12048 } while (0)
12049
12050 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12051 {
12052         const char *fw_file_name;
12053         struct bnx2x_fw_file_hdr *fw_hdr;
12054         int rc;
12055
12056         if (CHIP_IS_E1(bp))
12057                 fw_file_name = FW_FILE_NAME_E1;
12058         else
12059                 fw_file_name = FW_FILE_NAME_E1H;
12060
12061         pr_info("Loading %s\n", fw_file_name);
12062
12063         rc = request_firmware(&bp->firmware, fw_file_name, dev);
12064         if (rc) {
12065                 pr_err("Can't load firmware file %s\n", fw_file_name);
12066                 goto request_firmware_exit;
12067         }
12068
12069         rc = bnx2x_check_firmware(bp);
12070         if (rc) {
12071                 pr_err("Corrupt firmware file %s\n", fw_file_name);
12072                 goto request_firmware_exit;
12073         }
12074
12075         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12076
12077         /* Initialize the pointers to the init arrays */
12078         /* Blob */
12079         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12080
12081         /* Opcodes */
12082         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12083
12084         /* Offsets */
12085         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12086                             be16_to_cpu_n);
12087
12088         /* STORMs firmware */
12089         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12090                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12091         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
12092                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12093         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12094                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12095         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
12096                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
12097         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12098                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12099         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
12100                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12101         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12102                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12103         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
12104                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
12105
12106         return 0;
12107
12108 init_offsets_alloc_err:
12109         kfree(bp->init_ops);
12110 init_ops_alloc_err:
12111         kfree(bp->init_data);
12112 request_firmware_exit:
12113         release_firmware(bp->firmware);
12114
12115         return rc;
12116 }
12117
12118
12119 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12120                                     const struct pci_device_id *ent)
12121 {
12122         struct net_device *dev = NULL;
12123         struct bnx2x *bp;
12124         int pcie_width, pcie_speed;
12125         int rc;
12126
12127         /* dev zeroed in init_etherdev */
12128         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12129         if (!dev) {
12130                 pr_err("Cannot allocate net device\n");
12131                 return -ENOMEM;
12132         }
12133
12134         bp = netdev_priv(dev);
12135         bp->msg_enable = debug;
12136
12137         pci_set_drvdata(pdev, dev);
12138
12139         rc = bnx2x_init_dev(pdev, dev);
12140         if (rc < 0) {
12141                 free_netdev(dev);
12142                 return rc;
12143         }
12144
12145         rc = bnx2x_init_bp(bp);
12146         if (rc)
12147                 goto init_one_exit;
12148
12149         /* Set init arrays */
12150         rc = bnx2x_init_firmware(bp, &pdev->dev);
12151         if (rc) {
12152                 pr_err("Error loading firmware\n");
12153                 goto init_one_exit;
12154         }
12155
12156         rc = register_netdev(dev);
12157         if (rc) {
12158                 dev_err(&pdev->dev, "Cannot register net device\n");
12159                 goto init_one_exit;
12160         }
12161
12162         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12163         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12164                     board_info[ent->driver_data].name,
12165                     (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12166                     pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12167                     dev->base_addr, bp->pdev->irq, dev->dev_addr);
12168
12169         return 0;
12170
12171 init_one_exit:
12172         if (bp->regview)
12173                 iounmap(bp->regview);
12174
12175         if (bp->doorbells)
12176                 iounmap(bp->doorbells);
12177
12178         free_netdev(dev);
12179
12180         if (atomic_read(&pdev->enable_cnt) == 1)
12181                 pci_release_regions(pdev);
12182
12183         pci_disable_device(pdev);
12184         pci_set_drvdata(pdev, NULL);
12185
12186         return rc;
12187 }
12188
12189 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12190 {
12191         struct net_device *dev = pci_get_drvdata(pdev);
12192         struct bnx2x *bp;
12193
12194         if (!dev) {
12195                 pr_err("BAD net device from bnx2x_init_one\n");
12196                 return;
12197         }
12198         bp = netdev_priv(dev);
12199
12200         unregister_netdev(dev);
12201
12202         kfree(bp->init_ops_offsets);
12203         kfree(bp->init_ops);
12204         kfree(bp->init_data);
12205         release_firmware(bp->firmware);
12206
12207         if (bp->regview)
12208                 iounmap(bp->regview);
12209
12210         if (bp->doorbells)
12211                 iounmap(bp->doorbells);
12212
12213         free_netdev(dev);
12214
12215         if (atomic_read(&pdev->enable_cnt) == 1)
12216                 pci_release_regions(pdev);
12217
12218         pci_disable_device(pdev);
12219         pci_set_drvdata(pdev, NULL);
12220 }
12221
12222 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12223 {
12224         struct net_device *dev = pci_get_drvdata(pdev);
12225         struct bnx2x *bp;
12226
12227         if (!dev) {
12228                 pr_err("BAD net device from bnx2x_init_one\n");
12229                 return -ENODEV;
12230         }
12231         bp = netdev_priv(dev);
12232
12233         rtnl_lock();
12234
12235         pci_save_state(pdev);
12236
12237         if (!netif_running(dev)) {
12238                 rtnl_unlock();
12239                 return 0;
12240         }
12241
12242         netif_device_detach(dev);
12243
12244         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12245
12246         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12247
12248         rtnl_unlock();
12249
12250         return 0;
12251 }
12252
12253 static int bnx2x_resume(struct pci_dev *pdev)
12254 {
12255         struct net_device *dev = pci_get_drvdata(pdev);
12256         struct bnx2x *bp;
12257         int rc;
12258
12259         if (!dev) {
12260                 pr_err("BAD net device from bnx2x_init_one\n");
12261                 return -ENODEV;
12262         }
12263         bp = netdev_priv(dev);
12264
12265         rtnl_lock();
12266
12267         pci_restore_state(pdev);
12268
12269         if (!netif_running(dev)) {
12270                 rtnl_unlock();
12271                 return 0;
12272         }
12273
12274         bnx2x_set_power_state(bp, PCI_D0);
12275         netif_device_attach(dev);
12276
12277         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12278
12279         rtnl_unlock();
12280
12281         return rc;
12282 }
12283
12284 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12285 {
12286         int i;
12287
12288         bp->state = BNX2X_STATE_ERROR;
12289
12290         bp->rx_mode = BNX2X_RX_MODE_NONE;
12291
12292         bnx2x_netif_stop(bp, 0);
12293
12294         del_timer_sync(&bp->timer);
12295         bp->stats_state = STATS_STATE_DISABLED;
12296         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12297
12298         /* Release IRQs */
12299         bnx2x_free_irq(bp, false);
12300
12301         if (CHIP_IS_E1(bp)) {
12302                 struct mac_configuration_cmd *config =
12303                                                 bnx2x_sp(bp, mcast_config);
12304
12305                 for (i = 0; i < config->hdr.length; i++)
12306                         CAM_INVALIDATE(config->config_table[i]);
12307         }
12308
12309         /* Free SKBs, SGEs, TPA pool and driver internals */
12310         bnx2x_free_skbs(bp);
12311         for_each_queue(bp, i)
12312                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12313         for_each_queue(bp, i)
12314                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12315         bnx2x_free_mem(bp);
12316
12317         bp->state = BNX2X_STATE_CLOSED;
12318
12319         netif_carrier_off(bp->dev);
12320
12321         return 0;
12322 }
12323
12324 static void bnx2x_eeh_recover(struct bnx2x *bp)
12325 {
12326         u32 val;
12327
12328         mutex_init(&bp->port.phy_mutex);
12329
12330         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12331         bp->link_params.shmem_base = bp->common.shmem_base;
12332         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12333
12334         if (!bp->common.shmem_base ||
12335             (bp->common.shmem_base < 0xA0000) ||
12336             (bp->common.shmem_base >= 0xC0000)) {
12337                 BNX2X_DEV_INFO("MCP not active\n");
12338                 bp->flags |= NO_MCP_FLAG;
12339                 return;
12340         }
12341
12342         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12343         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12344                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12345                 BNX2X_ERR("BAD MCP validity signature\n");
12346
12347         if (!BP_NOMCP(bp)) {
12348                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12349                               & DRV_MSG_SEQ_NUMBER_MASK);
12350                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12351         }
12352 }
12353
12354 /**
12355  * bnx2x_io_error_detected - called when PCI error is detected
12356  * @pdev: Pointer to PCI device
12357  * @state: The current pci connection state
12358  *
12359  * This function is called after a PCI bus error affecting
12360  * this device has been detected.
12361  */
12362 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12363                                                 pci_channel_state_t state)
12364 {
12365         struct net_device *dev = pci_get_drvdata(pdev);
12366         struct bnx2x *bp = netdev_priv(dev);
12367
12368         rtnl_lock();
12369
12370         netif_device_detach(dev);
12371
12372         if (state == pci_channel_io_perm_failure) {
12373                 rtnl_unlock();
12374                 return PCI_ERS_RESULT_DISCONNECT;
12375         }
12376
12377         if (netif_running(dev))
12378                 bnx2x_eeh_nic_unload(bp);
12379
12380         pci_disable_device(pdev);
12381
12382         rtnl_unlock();
12383
12384         /* Request a slot reset */
12385         return PCI_ERS_RESULT_NEED_RESET;
12386 }
12387
12388 /**
12389  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12390  * @pdev: Pointer to PCI device
12391  *
12392  * Restart the card from scratch, as if from a cold-boot.
12393  */
12394 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12395 {
12396         struct net_device *dev = pci_get_drvdata(pdev);
12397         struct bnx2x *bp = netdev_priv(dev);
12398
12399         rtnl_lock();
12400
12401         if (pci_enable_device(pdev)) {
12402                 dev_err(&pdev->dev,
12403                         "Cannot re-enable PCI device after reset\n");
12404                 rtnl_unlock();
12405                 return PCI_ERS_RESULT_DISCONNECT;
12406         }
12407
12408         pci_set_master(pdev);
12409         pci_restore_state(pdev);
12410
12411         if (netif_running(dev))
12412                 bnx2x_set_power_state(bp, PCI_D0);
12413
12414         rtnl_unlock();
12415
12416         return PCI_ERS_RESULT_RECOVERED;
12417 }
12418
12419 /**
12420  * bnx2x_io_resume - called when traffic can start flowing again
12421  * @pdev: Pointer to PCI device
12422  *
12423  * This callback is called when the error recovery driver tells us that
12424  * its OK to resume normal operation.
12425  */
12426 static void bnx2x_io_resume(struct pci_dev *pdev)
12427 {
12428         struct net_device *dev = pci_get_drvdata(pdev);
12429         struct bnx2x *bp = netdev_priv(dev);
12430
12431         rtnl_lock();
12432
12433         bnx2x_eeh_recover(bp);
12434
12435         if (netif_running(dev))
12436                 bnx2x_nic_load(bp, LOAD_NORMAL);
12437
12438         netif_device_attach(dev);
12439
12440         rtnl_unlock();
12441 }
12442
12443 static struct pci_error_handlers bnx2x_err_handler = {
12444         .error_detected = bnx2x_io_error_detected,
12445         .slot_reset     = bnx2x_io_slot_reset,
12446         .resume         = bnx2x_io_resume,
12447 };
12448
12449 static struct pci_driver bnx2x_pci_driver = {
12450         .name        = DRV_MODULE_NAME,
12451         .id_table    = bnx2x_pci_tbl,
12452         .probe       = bnx2x_init_one,
12453         .remove      = __devexit_p(bnx2x_remove_one),
12454         .suspend     = bnx2x_suspend,
12455         .resume      = bnx2x_resume,
12456         .err_handler = &bnx2x_err_handler,
12457 };
12458
12459 static int __init bnx2x_init(void)
12460 {
12461         int ret;
12462
12463         pr_info("%s", version);
12464
12465         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12466         if (bnx2x_wq == NULL) {
12467                 pr_err("Cannot create workqueue\n");
12468                 return -ENOMEM;
12469         }
12470
12471         ret = pci_register_driver(&bnx2x_pci_driver);
12472         if (ret) {
12473                 pr_err("Cannot register driver\n");
12474                 destroy_workqueue(bnx2x_wq);
12475         }
12476         return ret;
12477 }
12478
12479 static void __exit bnx2x_cleanup(void)
12480 {
12481         pci_unregister_driver(&bnx2x_pci_driver);
12482
12483         destroy_workqueue(bnx2x_wq);
12484 }
12485
12486 module_init(bnx2x_init);
12487 module_exit(bnx2x_cleanup);
12488
12489 #ifdef BCM_CNIC
12490
12491 /* count denotes the number of new completions we have seen */
12492 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12493 {
12494         struct eth_spe *spe;
12495
12496 #ifdef BNX2X_STOP_ON_ERROR
12497         if (unlikely(bp->panic))
12498                 return;
12499 #endif
12500
12501         spin_lock_bh(&bp->spq_lock);
12502         bp->cnic_spq_pending -= count;
12503
12504         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12505              bp->cnic_spq_pending++) {
12506
12507                 if (!bp->cnic_kwq_pending)
12508                         break;
12509
12510                 spe = bnx2x_sp_get_next(bp);
12511                 *spe = *bp->cnic_kwq_cons;
12512
12513                 bp->cnic_kwq_pending--;
12514
12515                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12516                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12517
12518                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12519                         bp->cnic_kwq_cons = bp->cnic_kwq;
12520                 else
12521                         bp->cnic_kwq_cons++;
12522         }
12523         bnx2x_sp_prod_update(bp);
12524         spin_unlock_bh(&bp->spq_lock);
12525 }
12526
12527 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12528                                struct kwqe_16 *kwqes[], u32 count)
12529 {
12530         struct bnx2x *bp = netdev_priv(dev);
12531         int i;
12532
12533 #ifdef BNX2X_STOP_ON_ERROR
12534         if (unlikely(bp->panic))
12535                 return -EIO;
12536 #endif
12537
12538         spin_lock_bh(&bp->spq_lock);
12539
12540         for (i = 0; i < count; i++) {
12541                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12542
12543                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12544                         break;
12545
12546                 *bp->cnic_kwq_prod = *spe;
12547
12548                 bp->cnic_kwq_pending++;
12549
12550                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12551                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
12552                    spe->data.mac_config_addr.hi,
12553                    spe->data.mac_config_addr.lo,
12554                    bp->cnic_kwq_pending);
12555
12556                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12557                         bp->cnic_kwq_prod = bp->cnic_kwq;
12558                 else
12559                         bp->cnic_kwq_prod++;
12560         }
12561
12562         spin_unlock_bh(&bp->spq_lock);
12563
12564         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12565                 bnx2x_cnic_sp_post(bp, 0);
12566
12567         return i;
12568 }
12569
12570 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12571 {
12572         struct cnic_ops *c_ops;
12573         int rc = 0;
12574
12575         mutex_lock(&bp->cnic_mutex);
12576         c_ops = bp->cnic_ops;
12577         if (c_ops)
12578                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12579         mutex_unlock(&bp->cnic_mutex);
12580
12581         return rc;
12582 }
12583
12584 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12585 {
12586         struct cnic_ops *c_ops;
12587         int rc = 0;
12588
12589         rcu_read_lock();
12590         c_ops = rcu_dereference(bp->cnic_ops);
12591         if (c_ops)
12592                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12593         rcu_read_unlock();
12594
12595         return rc;
12596 }
12597
12598 /*
12599  * for commands that have no data
12600  */
12601 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12602 {
12603         struct cnic_ctl_info ctl = {0};
12604
12605         ctl.cmd = cmd;
12606
12607         return bnx2x_cnic_ctl_send(bp, &ctl);
12608 }
12609
12610 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12611 {
12612         struct cnic_ctl_info ctl;
12613
12614         /* first we tell CNIC and only then we count this as a completion */
12615         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12616         ctl.data.comp.cid = cid;
12617
12618         bnx2x_cnic_ctl_send_bh(bp, &ctl);
12619         bnx2x_cnic_sp_post(bp, 1);
12620 }
12621
12622 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12623 {
12624         struct bnx2x *bp = netdev_priv(dev);
12625         int rc = 0;
12626
12627         switch (ctl->cmd) {
12628         case DRV_CTL_CTXTBL_WR_CMD: {
12629                 u32 index = ctl->data.io.offset;
12630                 dma_addr_t addr = ctl->data.io.dma_addr;
12631
12632                 bnx2x_ilt_wr(bp, index, addr);
12633                 break;
12634         }
12635
12636         case DRV_CTL_COMPLETION_CMD: {
12637                 int count = ctl->data.comp.comp_count;
12638
12639                 bnx2x_cnic_sp_post(bp, count);
12640                 break;
12641         }
12642
12643         /* rtnl_lock is held.  */
12644         case DRV_CTL_START_L2_CMD: {
12645                 u32 cli = ctl->data.ring.client_id;
12646
12647                 bp->rx_mode_cl_mask |= (1 << cli);
12648                 bnx2x_set_storm_rx_mode(bp);
12649                 break;
12650         }
12651
12652         /* rtnl_lock is held.  */
12653         case DRV_CTL_STOP_L2_CMD: {
12654                 u32 cli = ctl->data.ring.client_id;
12655
12656                 bp->rx_mode_cl_mask &= ~(1 << cli);
12657                 bnx2x_set_storm_rx_mode(bp);
12658                 break;
12659         }
12660
12661         default:
12662                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12663                 rc = -EINVAL;
12664         }
12665
12666         return rc;
12667 }
12668
12669 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12670 {
12671         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12672
12673         if (bp->flags & USING_MSIX_FLAG) {
12674                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12675                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12676                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12677         } else {
12678                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12679                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12680         }
12681         cp->irq_arr[0].status_blk = bp->cnic_sb;
12682         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12683         cp->irq_arr[1].status_blk = bp->def_status_blk;
12684         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12685
12686         cp->num_irq = 2;
12687 }
12688
12689 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12690                                void *data)
12691 {
12692         struct bnx2x *bp = netdev_priv(dev);
12693         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12694
12695         if (ops == NULL)
12696                 return -EINVAL;
12697
12698         if (atomic_read(&bp->intr_sem) != 0)
12699                 return -EBUSY;
12700
12701         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12702         if (!bp->cnic_kwq)
12703                 return -ENOMEM;
12704
12705         bp->cnic_kwq_cons = bp->cnic_kwq;
12706         bp->cnic_kwq_prod = bp->cnic_kwq;
12707         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12708
12709         bp->cnic_spq_pending = 0;
12710         bp->cnic_kwq_pending = 0;
12711
12712         bp->cnic_data = data;
12713
12714         cp->num_irq = 0;
12715         cp->drv_state = CNIC_DRV_STATE_REGD;
12716
12717         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12718
12719         bnx2x_setup_cnic_irq_info(bp);
12720         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12721         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12722         rcu_assign_pointer(bp->cnic_ops, ops);
12723
12724         return 0;
12725 }
12726
12727 static int bnx2x_unregister_cnic(struct net_device *dev)
12728 {
12729         struct bnx2x *bp = netdev_priv(dev);
12730         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12731
12732         mutex_lock(&bp->cnic_mutex);
12733         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12734                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12735                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12736         }
12737         cp->drv_state = 0;
12738         rcu_assign_pointer(bp->cnic_ops, NULL);
12739         mutex_unlock(&bp->cnic_mutex);
12740         synchronize_rcu();
12741         kfree(bp->cnic_kwq);
12742         bp->cnic_kwq = NULL;
12743
12744         return 0;
12745 }
12746
12747 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12748 {
12749         struct bnx2x *bp = netdev_priv(dev);
12750         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12751
12752         cp->drv_owner = THIS_MODULE;
12753         cp->chip_id = CHIP_ID(bp);
12754         cp->pdev = bp->pdev;
12755         cp->io_base = bp->regview;
12756         cp->io_base2 = bp->doorbells;
12757         cp->max_kwqe_pending = 8;
12758         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12759         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12760         cp->ctx_tbl_len = CNIC_ILT_LINES;
12761         cp->starting_cid = BCM_CNIC_CID_START;
12762         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12763         cp->drv_ctl = bnx2x_drv_ctl;
12764         cp->drv_register_cnic = bnx2x_register_cnic;
12765         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12766
12767         return cp;
12768 }
12769 EXPORT_SYMBOL(bnx2x_cnic_probe);
12770
12771 #endif /* BCM_CNIC */
12772