Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vapier...
[pandora-kernel.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.105-1"
60 #define DRV_MODULE_RELDATE      "2009/04/22"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
84
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
88
89 static int int_mode;
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
93 static int poll;
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
96
97 static int mrrs = -1;
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
101 static int debug;
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
106
107 static struct workqueue_struct *bnx2x_wq;
108
109 enum bnx2x_board_type {
110         BCM57710 = 0,
111         BCM57711 = 1,
112         BCM57711E = 2,
113 };
114
115 /* indexed by board_type, above */
116 static struct {
117         char *name;
118 } board_info[] __devinitdata = {
119         { "Broadcom NetXtreme II BCM57710 XGb" },
120         { "Broadcom NetXtreme II BCM57711 XGb" },
121         { "Broadcom NetXtreme II BCM57711E XGb" }
122 };
123
124
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
132         { 0 }
133 };
134
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
140
141 /* used only at init
142  * locking is done by mcp
143  */
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145 {
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149                                PCICFG_VENDOR_ID_OFFSET);
150 }
151
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153 {
154         u32 val;
155
156         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159                                PCICFG_VENDOR_ID_OFFSET);
160
161         return val;
162 }
163
164 static const u32 dmae_reg_go_c[] = {
165         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169 };
170
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173                             int idx)
174 {
175         u32 cmd_offset;
176         int i;
177
178         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
182                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
184         }
185         REG_WR(bp, dmae_reg_go_c[idx], 1);
186 }
187
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189                       u32 len32)
190 {
191         struct dmae_command *dmae = &bp->init_dmae;
192         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
193         int cnt = 200;
194
195         if (!bp->dmae_ready) {
196                 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
199                    "  using indirect\n", dst_addr, len32);
200                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201                 return;
202         }
203
204         mutex_lock(&bp->dmae_mutex);
205
206         memset(dmae, 0, sizeof(struct dmae_command));
207
208         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211 #ifdef __BIG_ENDIAN
212                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
213 #else
214                         DMAE_CMD_ENDIANITY_DW_SWAP |
215 #endif
216                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218         dmae->src_addr_lo = U64_LO(dma_addr);
219         dmae->src_addr_hi = U64_HI(dma_addr);
220         dmae->dst_addr_lo = dst_addr >> 2;
221         dmae->dst_addr_hi = 0;
222         dmae->len = len32;
223         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225         dmae->comp_val = DMAE_COMP_VAL;
226
227         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
229                     "dst_addr [%x:%08x (%08x)]\n"
230            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
231            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
237
238         *wb_comp = 0;
239
240         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
241
242         udelay(5);
243
244         while (*wb_comp != DMAE_COMP_VAL) {
245                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
247                 if (!cnt) {
248                         BNX2X_ERR("DMAE timeout!\n");
249                         break;
250                 }
251                 cnt--;
252                 /* adjust delay for emulation/FPGA */
253                 if (CHIP_REV_IS_SLOW(bp))
254                         msleep(100);
255                 else
256                         udelay(5);
257         }
258
259         mutex_unlock(&bp->dmae_mutex);
260 }
261
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
263 {
264         struct dmae_command *dmae = &bp->init_dmae;
265         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
266         int cnt = 200;
267
268         if (!bp->dmae_ready) {
269                 u32 *data = bnx2x_sp(bp, wb_data[0]);
270                 int i;
271
272                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
273                    "  using indirect\n", src_addr, len32);
274                 for (i = 0; i < len32; i++)
275                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276                 return;
277         }
278
279         mutex_lock(&bp->dmae_mutex);
280
281         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282         memset(dmae, 0, sizeof(struct dmae_command));
283
284         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287 #ifdef __BIG_ENDIAN
288                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
289 #else
290                         DMAE_CMD_ENDIANITY_DW_SWAP |
291 #endif
292                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294         dmae->src_addr_lo = src_addr >> 2;
295         dmae->src_addr_hi = 0;
296         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298         dmae->len = len32;
299         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301         dmae->comp_val = DMAE_COMP_VAL;
302
303         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
305                     "dst_addr [%x:%08x (%08x)]\n"
306            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
307            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
310
311         *wb_comp = 0;
312
313         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
314
315         udelay(5);
316
317         while (*wb_comp != DMAE_COMP_VAL) {
318
319                 if (!cnt) {
320                         BNX2X_ERR("DMAE timeout!\n");
321                         break;
322                 }
323                 cnt--;
324                 /* adjust delay for emulation/FPGA */
325                 if (CHIP_REV_IS_SLOW(bp))
326                         msleep(100);
327                 else
328                         udelay(5);
329         }
330         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
333
334         mutex_unlock(&bp->dmae_mutex);
335 }
336
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339 {
340         u32 wb_write[2];
341
342         wb_write[0] = val_hi;
343         wb_write[1] = val_lo;
344         REG_WR_DMAE(bp, reg, wb_write, 2);
345 }
346
347 #ifdef USE_WB_RD
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349 {
350         u32 wb_data[2];
351
352         REG_RD_DMAE(bp, reg, wb_data, 2);
353
354         return HILO_U64(wb_data[0], wb_data[1]);
355 }
356 #endif
357
358 static int bnx2x_mc_assert(struct bnx2x *bp)
359 {
360         char last_idx;
361         int i, rc = 0;
362         u32 row0, row1, row2, row3;
363
364         /* XSTORM */
365         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
367         if (last_idx)
368                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370         /* print the asserts */
371         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374                               XSTORM_ASSERT_LIST_OFFSET(i));
375                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384                                   " 0x%08x 0x%08x 0x%08x\n",
385                                   i, row3, row2, row1, row0);
386                         rc++;
387                 } else {
388                         break;
389                 }
390         }
391
392         /* TSTORM */
393         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
395         if (last_idx)
396                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398         /* print the asserts */
399         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402                               TSTORM_ASSERT_LIST_OFFSET(i));
403                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412                                   " 0x%08x 0x%08x 0x%08x\n",
413                                   i, row3, row2, row1, row0);
414                         rc++;
415                 } else {
416                         break;
417                 }
418         }
419
420         /* CSTORM */
421         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
423         if (last_idx)
424                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426         /* print the asserts */
427         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430                               CSTORM_ASSERT_LIST_OFFSET(i));
431                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440                                   " 0x%08x 0x%08x 0x%08x\n",
441                                   i, row3, row2, row1, row0);
442                         rc++;
443                 } else {
444                         break;
445                 }
446         }
447
448         /* USTORM */
449         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450                            USTORM_ASSERT_LIST_INDEX_OFFSET);
451         if (last_idx)
452                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454         /* print the asserts */
455         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458                               USTORM_ASSERT_LIST_OFFSET(i));
459                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
461                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
463                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468                                   " 0x%08x 0x%08x 0x%08x\n",
469                                   i, row3, row2, row1, row0);
470                         rc++;
471                 } else {
472                         break;
473                 }
474         }
475
476         return rc;
477 }
478
479 static void bnx2x_fw_dump(struct bnx2x *bp)
480 {
481         u32 mark, offset;
482         __be32 data[9];
483         int word;
484
485         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486         mark = ((mark + 0x3) & ~0x3);
487         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
488
489         printk(KERN_ERR PFX);
490         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
491                 for (word = 0; word < 8; word++)
492                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
493                                                   offset + 4*word));
494                 data[8] = 0x0;
495                 printk(KERN_CONT "%s", (char *)data);
496         }
497         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
498                 for (word = 0; word < 8; word++)
499                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
500                                                   offset + 4*word));
501                 data[8] = 0x0;
502                 printk(KERN_CONT "%s", (char *)data);
503         }
504         printk(KERN_ERR PFX "end of fw dump\n");
505 }
506
507 static void bnx2x_panic_dump(struct bnx2x *bp)
508 {
509         int i;
510         u16 j, start, end;
511
512         bp->stats_state = STATS_STATE_DISABLED;
513         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
514
515         BNX2X_ERR("begin crash dump -----------------\n");
516
517         /* Indices */
518         /* Common */
519         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
520                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
521                   "  spq_prod_idx(%u)\n",
522                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
523                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
524
525         /* Rx */
526         for_each_rx_queue(bp, i) {
527                 struct bnx2x_fastpath *fp = &bp->fp[i];
528
529                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
530                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
531                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
532                           i, fp->rx_bd_prod, fp->rx_bd_cons,
533                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
534                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
535                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
536                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
537                           fp->rx_sge_prod, fp->last_max_sge,
538                           le16_to_cpu(fp->fp_u_idx),
539                           fp->status_blk->u_status_block.status_block_index);
540         }
541
542         /* Tx */
543         for_each_tx_queue(bp, i) {
544                 struct bnx2x_fastpath *fp = &bp->fp[i];
545                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
546
547                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
548                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
549                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
550                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
551                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
552                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
553                           fp->status_blk->c_status_block.status_block_index,
554                           hw_prods->packets_prod, hw_prods->bds_prod);
555         }
556
557         /* Rings */
558         /* Rx */
559         for_each_rx_queue(bp, i) {
560                 struct bnx2x_fastpath *fp = &bp->fp[i];
561
562                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
563                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
564                 for (j = start; j != end; j = RX_BD(j + 1)) {
565                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
566                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
567
568                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
569                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
570                 }
571
572                 start = RX_SGE(fp->rx_sge_prod);
573                 end = RX_SGE(fp->last_max_sge);
574                 for (j = start; j != end; j = RX_SGE(j + 1)) {
575                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
576                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
577
578                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
579                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
580                 }
581
582                 start = RCQ_BD(fp->rx_comp_cons - 10);
583                 end = RCQ_BD(fp->rx_comp_cons + 503);
584                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
585                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
586
587                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
588                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
589                 }
590         }
591
592         /* Tx */
593         for_each_tx_queue(bp, i) {
594                 struct bnx2x_fastpath *fp = &bp->fp[i];
595
596                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
597                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
598                 for (j = start; j != end; j = TX_BD(j + 1)) {
599                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
600
601                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
602                                   i, j, sw_bd->skb, sw_bd->first_bd);
603                 }
604
605                 start = TX_BD(fp->tx_bd_cons - 10);
606                 end = TX_BD(fp->tx_bd_cons + 254);
607                 for (j = start; j != end; j = TX_BD(j + 1)) {
608                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
609
610                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
611                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
612                 }
613         }
614
615         bnx2x_fw_dump(bp);
616         bnx2x_mc_assert(bp);
617         BNX2X_ERR("end crash dump -----------------\n");
618 }
619
620 static void bnx2x_int_enable(struct bnx2x *bp)
621 {
622         int port = BP_PORT(bp);
623         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
624         u32 val = REG_RD(bp, addr);
625         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
626         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
627
628         if (msix) {
629                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
630                          HC_CONFIG_0_REG_INT_LINE_EN_0);
631                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
632                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
633         } else if (msi) {
634                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
635                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
636                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
637                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
638         } else {
639                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
642                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645                    val, port, addr);
646
647                 REG_WR(bp, addr, val);
648
649                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
650         }
651
652         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
653            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
654
655         REG_WR(bp, addr, val);
656
657         if (CHIP_IS_E1H(bp)) {
658                 /* init leading/trailing edge */
659                 if (IS_E1HMF(bp)) {
660                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
661                         if (bp->port.pmf)
662                                 /* enable nig and gpio3 attention */
663                                 val |= 0x1100;
664                 } else
665                         val = 0xffff;
666
667                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
668                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
669         }
670 }
671
672 static void bnx2x_int_disable(struct bnx2x *bp)
673 {
674         int port = BP_PORT(bp);
675         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
676         u32 val = REG_RD(bp, addr);
677
678         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
679                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
680                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
681                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
682
683         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
684            val, port, addr);
685
686         /* flush all outstanding writes */
687         mmiowb();
688
689         REG_WR(bp, addr, val);
690         if (REG_RD(bp, addr) != val)
691                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
692
693 }
694
695 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
696 {
697         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
698         int i, offset;
699
700         /* disable interrupt handling */
701         atomic_inc(&bp->intr_sem);
702         if (disable_hw)
703                 /* prevent the HW from sending interrupts */
704                 bnx2x_int_disable(bp);
705
706         /* make sure all ISRs are done */
707         if (msix) {
708                 synchronize_irq(bp->msix_table[0].vector);
709                 offset = 1;
710                 for_each_queue(bp, i)
711                         synchronize_irq(bp->msix_table[i + offset].vector);
712         } else
713                 synchronize_irq(bp->pdev->irq);
714
715         /* make sure sp_task is not running */
716         cancel_delayed_work(&bp->sp_task);
717         flush_workqueue(bnx2x_wq);
718 }
719
720 /* fast path */
721
722 /*
723  * General service functions
724  */
725
726 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
727                                 u8 storm, u16 index, u8 op, u8 update)
728 {
729         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
730                        COMMAND_REG_INT_ACK);
731         struct igu_ack_register igu_ack;
732
733         igu_ack.status_block_index = index;
734         igu_ack.sb_id_and_flags =
735                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
736                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
737                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
738                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
739
740         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
741            (*(u32 *)&igu_ack), hc_addr);
742         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
743 }
744
745 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
746 {
747         struct host_status_block *fpsb = fp->status_blk;
748         u16 rc = 0;
749
750         barrier(); /* status block is written to by the chip */
751         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
752                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
753                 rc |= 1;
754         }
755         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
756                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
757                 rc |= 2;
758         }
759         return rc;
760 }
761
762 static u16 bnx2x_ack_int(struct bnx2x *bp)
763 {
764         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
765                        COMMAND_REG_SIMD_MASK);
766         u32 result = REG_RD(bp, hc_addr);
767
768         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
769            result, hc_addr);
770
771         return result;
772 }
773
774
775 /*
776  * fast path service functions
777  */
778
779 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
780 {
781         u16 tx_cons_sb;
782
783         /* Tell compiler that status block fields can change */
784         barrier();
785         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
786         return (fp->tx_pkt_cons != tx_cons_sb);
787 }
788
789 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
790 {
791         /* Tell compiler that consumer and producer can change */
792         barrier();
793         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
794 }
795
796 /* free skb in the packet ring at pos idx
797  * return idx of last bd freed
798  */
799 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
800                              u16 idx)
801 {
802         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
803         struct eth_tx_bd *tx_bd;
804         struct sk_buff *skb = tx_buf->skb;
805         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
806         int nbd;
807
808         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
809            idx, tx_buf, skb);
810
811         /* unmap first bd */
812         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
813         tx_bd = &fp->tx_desc_ring[bd_idx];
814         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
815                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
816
817         nbd = le16_to_cpu(tx_bd->nbd) - 1;
818         new_cons = nbd + tx_buf->first_bd;
819 #ifdef BNX2X_STOP_ON_ERROR
820         if (nbd > (MAX_SKB_FRAGS + 2)) {
821                 BNX2X_ERR("BAD nbd!\n");
822                 bnx2x_panic();
823         }
824 #endif
825
826         /* Skip a parse bd and the TSO split header bd
827            since they have no mapping */
828         if (nbd)
829                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
830
831         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
832                                            ETH_TX_BD_FLAGS_TCP_CSUM |
833                                            ETH_TX_BD_FLAGS_SW_LSO)) {
834                 if (--nbd)
835                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
836                 tx_bd = &fp->tx_desc_ring[bd_idx];
837                 /* is this a TSO split header bd? */
838                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
839                         if (--nbd)
840                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
841                 }
842         }
843
844         /* now free frags */
845         while (nbd > 0) {
846
847                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
848                 tx_bd = &fp->tx_desc_ring[bd_idx];
849                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
850                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
851                 if (--nbd)
852                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
853         }
854
855         /* release skb */
856         WARN_ON(!skb);
857         dev_kfree_skb(skb);
858         tx_buf->first_bd = 0;
859         tx_buf->skb = NULL;
860
861         return new_cons;
862 }
863
864 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
865 {
866         s16 used;
867         u16 prod;
868         u16 cons;
869
870         barrier(); /* Tell compiler that prod and cons can change */
871         prod = fp->tx_bd_prod;
872         cons = fp->tx_bd_cons;
873
874         /* NUM_TX_RINGS = number of "next-page" entries
875            It will be used as a threshold */
876         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
877
878 #ifdef BNX2X_STOP_ON_ERROR
879         WARN_ON(used < 0);
880         WARN_ON(used > fp->bp->tx_ring_size);
881         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
882 #endif
883
884         return (s16)(fp->bp->tx_ring_size) - used;
885 }
886
887 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
888 {
889         struct bnx2x *bp = fp->bp;
890         struct netdev_queue *txq;
891         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
892         int done = 0;
893
894 #ifdef BNX2X_STOP_ON_ERROR
895         if (unlikely(bp->panic))
896                 return;
897 #endif
898
899         txq = netdev_get_tx_queue(bp->dev, fp->index);
900         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
901         sw_cons = fp->tx_pkt_cons;
902
903         while (sw_cons != hw_cons) {
904                 u16 pkt_cons;
905
906                 pkt_cons = TX_BD(sw_cons);
907
908                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
909
910                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
911                    hw_cons, sw_cons, pkt_cons);
912
913 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
914                         rmb();
915                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
916                 }
917 */
918                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
919                 sw_cons++;
920                 done++;
921         }
922
923         fp->tx_pkt_cons = sw_cons;
924         fp->tx_bd_cons = bd_cons;
925
926         /* TBD need a thresh? */
927         if (unlikely(netif_tx_queue_stopped(txq))) {
928
929                 __netif_tx_lock(txq, smp_processor_id());
930
931                 /* Need to make the tx_bd_cons update visible to start_xmit()
932                  * before checking for netif_tx_queue_stopped().  Without the
933                  * memory barrier, there is a small possibility that
934                  * start_xmit() will miss it and cause the queue to be stopped
935                  * forever.
936                  */
937                 smp_mb();
938
939                 if ((netif_tx_queue_stopped(txq)) &&
940                     (bp->state == BNX2X_STATE_OPEN) &&
941                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
942                         netif_tx_wake_queue(txq);
943
944                 __netif_tx_unlock(txq);
945         }
946 }
947
948
949 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
950                            union eth_rx_cqe *rr_cqe)
951 {
952         struct bnx2x *bp = fp->bp;
953         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
954         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
955
956         DP(BNX2X_MSG_SP,
957            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
958            fp->index, cid, command, bp->state,
959            rr_cqe->ramrod_cqe.ramrod_type);
960
961         bp->spq_left++;
962
963         if (fp->index) {
964                 switch (command | fp->state) {
965                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
966                                                 BNX2X_FP_STATE_OPENING):
967                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
968                            cid);
969                         fp->state = BNX2X_FP_STATE_OPEN;
970                         break;
971
972                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
973                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
974                            cid);
975                         fp->state = BNX2X_FP_STATE_HALTED;
976                         break;
977
978                 default:
979                         BNX2X_ERR("unexpected MC reply (%d)  "
980                                   "fp->state is %x\n", command, fp->state);
981                         break;
982                 }
983                 mb(); /* force bnx2x_wait_ramrod() to see the change */
984                 return;
985         }
986
987         switch (command | bp->state) {
988         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
989                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
990                 bp->state = BNX2X_STATE_OPEN;
991                 break;
992
993         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
994                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
995                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
996                 fp->state = BNX2X_FP_STATE_HALTED;
997                 break;
998
999         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1001                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1002                 break;
1003
1004
1005         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1006         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1007                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1008                 bp->set_mac_pending = 0;
1009                 break;
1010
1011         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1012                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1013                 break;
1014
1015         default:
1016                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1017                           command, bp->state);
1018                 break;
1019         }
1020         mb(); /* force bnx2x_wait_ramrod() to see the change */
1021 }
1022
1023 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1024                                      struct bnx2x_fastpath *fp, u16 index)
1025 {
1026         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1027         struct page *page = sw_buf->page;
1028         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1029
1030         /* Skip "next page" elements */
1031         if (!page)
1032                 return;
1033
1034         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1035                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1036         __free_pages(page, PAGES_PER_SGE_SHIFT);
1037
1038         sw_buf->page = NULL;
1039         sge->addr_hi = 0;
1040         sge->addr_lo = 0;
1041 }
1042
1043 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1044                                            struct bnx2x_fastpath *fp, int last)
1045 {
1046         int i;
1047
1048         for (i = 0; i < last; i++)
1049                 bnx2x_free_rx_sge(bp, fp, i);
1050 }
1051
1052 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1053                                      struct bnx2x_fastpath *fp, u16 index)
1054 {
1055         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1056         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1057         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1058         dma_addr_t mapping;
1059
1060         if (unlikely(page == NULL))
1061                 return -ENOMEM;
1062
1063         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1064                                PCI_DMA_FROMDEVICE);
1065         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1066                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1067                 return -ENOMEM;
1068         }
1069
1070         sw_buf->page = page;
1071         pci_unmap_addr_set(sw_buf, mapping, mapping);
1072
1073         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1074         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1075
1076         return 0;
1077 }
1078
1079 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1080                                      struct bnx2x_fastpath *fp, u16 index)
1081 {
1082         struct sk_buff *skb;
1083         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1084         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1085         dma_addr_t mapping;
1086
1087         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1088         if (unlikely(skb == NULL))
1089                 return -ENOMEM;
1090
1091         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1092                                  PCI_DMA_FROMDEVICE);
1093         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1094                 dev_kfree_skb(skb);
1095                 return -ENOMEM;
1096         }
1097
1098         rx_buf->skb = skb;
1099         pci_unmap_addr_set(rx_buf, mapping, mapping);
1100
1101         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1102         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1103
1104         return 0;
1105 }
1106
1107 /* note that we are not allocating a new skb,
1108  * we are just moving one from cons to prod
1109  * we are not creating a new mapping,
1110  * so there is no need to check for dma_mapping_error().
1111  */
1112 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1113                                struct sk_buff *skb, u16 cons, u16 prod)
1114 {
1115         struct bnx2x *bp = fp->bp;
1116         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1117         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1118         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1119         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1120
1121         pci_dma_sync_single_for_device(bp->pdev,
1122                                        pci_unmap_addr(cons_rx_buf, mapping),
1123                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1124
1125         prod_rx_buf->skb = cons_rx_buf->skb;
1126         pci_unmap_addr_set(prod_rx_buf, mapping,
1127                            pci_unmap_addr(cons_rx_buf, mapping));
1128         *prod_bd = *cons_bd;
1129 }
1130
1131 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1132                                              u16 idx)
1133 {
1134         u16 last_max = fp->last_max_sge;
1135
1136         if (SUB_S16(idx, last_max) > 0)
1137                 fp->last_max_sge = idx;
1138 }
1139
1140 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1141 {
1142         int i, j;
1143
1144         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1145                 int idx = RX_SGE_CNT * i - 1;
1146
1147                 for (j = 0; j < 2; j++) {
1148                         SGE_MASK_CLEAR_BIT(fp, idx);
1149                         idx--;
1150                 }
1151         }
1152 }
1153
1154 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1155                                   struct eth_fast_path_rx_cqe *fp_cqe)
1156 {
1157         struct bnx2x *bp = fp->bp;
1158         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1159                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1160                       SGE_PAGE_SHIFT;
1161         u16 last_max, last_elem, first_elem;
1162         u16 delta = 0;
1163         u16 i;
1164
1165         if (!sge_len)
1166                 return;
1167
1168         /* First mark all used pages */
1169         for (i = 0; i < sge_len; i++)
1170                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1171
1172         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1173            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1174
1175         /* Here we assume that the last SGE index is the biggest */
1176         prefetch((void *)(fp->sge_mask));
1177         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1178
1179         last_max = RX_SGE(fp->last_max_sge);
1180         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1181         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1182
1183         /* If ring is not full */
1184         if (last_elem + 1 != first_elem)
1185                 last_elem++;
1186
1187         /* Now update the prod */
1188         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1189                 if (likely(fp->sge_mask[i]))
1190                         break;
1191
1192                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1193                 delta += RX_SGE_MASK_ELEM_SZ;
1194         }
1195
1196         if (delta > 0) {
1197                 fp->rx_sge_prod += delta;
1198                 /* clear page-end entries */
1199                 bnx2x_clear_sge_mask_next_elems(fp);
1200         }
1201
1202         DP(NETIF_MSG_RX_STATUS,
1203            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1204            fp->last_max_sge, fp->rx_sge_prod);
1205 }
1206
1207 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1208 {
1209         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1210         memset(fp->sge_mask, 0xff,
1211                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1212
1213         /* Clear the two last indices in the page to 1:
1214            these are the indices that correspond to the "next" element,
1215            hence will never be indicated and should be removed from
1216            the calculations. */
1217         bnx2x_clear_sge_mask_next_elems(fp);
1218 }
1219
1220 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1221                             struct sk_buff *skb, u16 cons, u16 prod)
1222 {
1223         struct bnx2x *bp = fp->bp;
1224         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1225         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1226         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1227         dma_addr_t mapping;
1228
1229         /* move empty skb from pool to prod and map it */
1230         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1231         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1232                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1233         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1234
1235         /* move partial skb from cons to pool (don't unmap yet) */
1236         fp->tpa_pool[queue] = *cons_rx_buf;
1237
1238         /* mark bin state as start - print error if current state != stop */
1239         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1240                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1241
1242         fp->tpa_state[queue] = BNX2X_TPA_START;
1243
1244         /* point prod_bd to new skb */
1245         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1246         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1247
1248 #ifdef BNX2X_STOP_ON_ERROR
1249         fp->tpa_queue_used |= (1 << queue);
1250 #ifdef __powerpc64__
1251         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1252 #else
1253         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1254 #endif
1255            fp->tpa_queue_used);
1256 #endif
1257 }
1258
1259 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1260                                struct sk_buff *skb,
1261                                struct eth_fast_path_rx_cqe *fp_cqe,
1262                                u16 cqe_idx)
1263 {
1264         struct sw_rx_page *rx_pg, old_rx_pg;
1265         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1266         u32 i, frag_len, frag_size, pages;
1267         int err;
1268         int j;
1269
1270         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1271         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1272
1273         /* This is needed in order to enable forwarding support */
1274         if (frag_size)
1275                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1276                                                max(frag_size, (u32)len_on_bd));
1277
1278 #ifdef BNX2X_STOP_ON_ERROR
1279         if (pages >
1280             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1281                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1282                           pages, cqe_idx);
1283                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1284                           fp_cqe->pkt_len, len_on_bd);
1285                 bnx2x_panic();
1286                 return -EINVAL;
1287         }
1288 #endif
1289
1290         /* Run through the SGL and compose the fragmented skb */
1291         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1292                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1293
1294                 /* FW gives the indices of the SGE as if the ring is an array
1295                    (meaning that "next" element will consume 2 indices) */
1296                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1297                 rx_pg = &fp->rx_page_ring[sge_idx];
1298                 old_rx_pg = *rx_pg;
1299
1300                 /* If we fail to allocate a substitute page, we simply stop
1301                    where we are and drop the whole packet */
1302                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1303                 if (unlikely(err)) {
1304                         fp->eth_q_stats.rx_skb_alloc_failed++;
1305                         return err;
1306                 }
1307
1308                 /* Unmap the page as we r going to pass it to the stack */
1309                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1310                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1311
1312                 /* Add one frag and update the appropriate fields in the skb */
1313                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1314
1315                 skb->data_len += frag_len;
1316                 skb->truesize += frag_len;
1317                 skb->len += frag_len;
1318
1319                 frag_size -= frag_len;
1320         }
1321
1322         return 0;
1323 }
1324
1325 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1326                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1327                            u16 cqe_idx)
1328 {
1329         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1330         struct sk_buff *skb = rx_buf->skb;
1331         /* alloc new skb */
1332         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1333
1334         /* Unmap skb in the pool anyway, as we are going to change
1335            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1336            fails. */
1337         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1338                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1339
1340         if (likely(new_skb)) {
1341                 /* fix ip xsum and give it to the stack */
1342                 /* (no need to map the new skb) */
1343 #ifdef BCM_VLAN
1344                 int is_vlan_cqe =
1345                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1346                          PARSING_FLAGS_VLAN);
1347                 int is_not_hwaccel_vlan_cqe =
1348                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1349 #endif
1350
1351                 prefetch(skb);
1352                 prefetch(((char *)(skb)) + 128);
1353
1354 #ifdef BNX2X_STOP_ON_ERROR
1355                 if (pad + len > bp->rx_buf_size) {
1356                         BNX2X_ERR("skb_put is about to fail...  "
1357                                   "pad %d  len %d  rx_buf_size %d\n",
1358                                   pad, len, bp->rx_buf_size);
1359                         bnx2x_panic();
1360                         return;
1361                 }
1362 #endif
1363
1364                 skb_reserve(skb, pad);
1365                 skb_put(skb, len);
1366
1367                 skb->protocol = eth_type_trans(skb, bp->dev);
1368                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1369
1370                 {
1371                         struct iphdr *iph;
1372
1373                         iph = (struct iphdr *)skb->data;
1374 #ifdef BCM_VLAN
1375                         /* If there is no Rx VLAN offloading -
1376                            take VLAN tag into an account */
1377                         if (unlikely(is_not_hwaccel_vlan_cqe))
1378                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1379 #endif
1380                         iph->check = 0;
1381                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1382                 }
1383
1384                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1385                                          &cqe->fast_path_cqe, cqe_idx)) {
1386 #ifdef BCM_VLAN
1387                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1388                             (!is_not_hwaccel_vlan_cqe))
1389                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1390                                                 le16_to_cpu(cqe->fast_path_cqe.
1391                                                             vlan_tag));
1392                         else
1393 #endif
1394                                 netif_receive_skb(skb);
1395                 } else {
1396                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1397                            " - dropping packet!\n");
1398                         dev_kfree_skb(skb);
1399                 }
1400
1401
1402                 /* put new skb in bin */
1403                 fp->tpa_pool[queue].skb = new_skb;
1404
1405         } else {
1406                 /* else drop the packet and keep the buffer in the bin */
1407                 DP(NETIF_MSG_RX_STATUS,
1408                    "Failed to allocate new skb - dropping packet!\n");
1409                 fp->eth_q_stats.rx_skb_alloc_failed++;
1410         }
1411
1412         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1413 }
1414
1415 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1416                                         struct bnx2x_fastpath *fp,
1417                                         u16 bd_prod, u16 rx_comp_prod,
1418                                         u16 rx_sge_prod)
1419 {
1420         struct ustorm_eth_rx_producers rx_prods = {0};
1421         int i;
1422
1423         /* Update producers */
1424         rx_prods.bd_prod = bd_prod;
1425         rx_prods.cqe_prod = rx_comp_prod;
1426         rx_prods.sge_prod = rx_sge_prod;
1427
1428         /*
1429          * Make sure that the BD and SGE data is updated before updating the
1430          * producers since FW might read the BD/SGE right after the producer
1431          * is updated.
1432          * This is only applicable for weak-ordered memory model archs such
1433          * as IA-64. The following barrier is also mandatory since FW will
1434          * assumes BDs must have buffers.
1435          */
1436         wmb();
1437
1438         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1439                 REG_WR(bp, BAR_USTRORM_INTMEM +
1440                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1441                        ((u32 *)&rx_prods)[i]);
1442
1443         mmiowb(); /* keep prod updates ordered */
1444
1445         DP(NETIF_MSG_RX_STATUS,
1446            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1447            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1448 }
1449
1450 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1451 {
1452         struct bnx2x *bp = fp->bp;
1453         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1454         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1455         int rx_pkt = 0;
1456
1457 #ifdef BNX2X_STOP_ON_ERROR
1458         if (unlikely(bp->panic))
1459                 return 0;
1460 #endif
1461
1462         /* CQ "next element" is of the size of the regular element,
1463            that's why it's ok here */
1464         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1465         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1466                 hw_comp_cons++;
1467
1468         bd_cons = fp->rx_bd_cons;
1469         bd_prod = fp->rx_bd_prod;
1470         bd_prod_fw = bd_prod;
1471         sw_comp_cons = fp->rx_comp_cons;
1472         sw_comp_prod = fp->rx_comp_prod;
1473
1474         /* Memory barrier necessary as speculative reads of the rx
1475          * buffer can be ahead of the index in the status block
1476          */
1477         rmb();
1478
1479         DP(NETIF_MSG_RX_STATUS,
1480            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1481            fp->index, hw_comp_cons, sw_comp_cons);
1482
1483         while (sw_comp_cons != hw_comp_cons) {
1484                 struct sw_rx_bd *rx_buf = NULL;
1485                 struct sk_buff *skb;
1486                 union eth_rx_cqe *cqe;
1487                 u8 cqe_fp_flags;
1488                 u16 len, pad;
1489
1490                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1491                 bd_prod = RX_BD(bd_prod);
1492                 bd_cons = RX_BD(bd_cons);
1493
1494                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1495                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1496
1497                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1498                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1499                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1500                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1501                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1502                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1503
1504                 /* is this a slowpath msg? */
1505                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1506                         bnx2x_sp_event(fp, cqe);
1507                         goto next_cqe;
1508
1509                 /* this is an rx packet */
1510                 } else {
1511                         rx_buf = &fp->rx_buf_ring[bd_cons];
1512                         skb = rx_buf->skb;
1513                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1514                         pad = cqe->fast_path_cqe.placement_offset;
1515
1516                         /* If CQE is marked both TPA_START and TPA_END
1517                            it is a non-TPA CQE */
1518                         if ((!fp->disable_tpa) &&
1519                             (TPA_TYPE(cqe_fp_flags) !=
1520                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1521                                 u16 queue = cqe->fast_path_cqe.queue_index;
1522
1523                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1524                                         DP(NETIF_MSG_RX_STATUS,
1525                                            "calling tpa_start on queue %d\n",
1526                                            queue);
1527
1528                                         bnx2x_tpa_start(fp, queue, skb,
1529                                                         bd_cons, bd_prod);
1530                                         goto next_rx;
1531                                 }
1532
1533                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1534                                         DP(NETIF_MSG_RX_STATUS,
1535                                            "calling tpa_stop on queue %d\n",
1536                                            queue);
1537
1538                                         if (!BNX2X_RX_SUM_FIX(cqe))
1539                                                 BNX2X_ERR("STOP on none TCP "
1540                                                           "data\n");
1541
1542                                         /* This is a size of the linear data
1543                                            on this skb */
1544                                         len = le16_to_cpu(cqe->fast_path_cqe.
1545                                                                 len_on_bd);
1546                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1547                                                     len, cqe, comp_ring_cons);
1548 #ifdef BNX2X_STOP_ON_ERROR
1549                                         if (bp->panic)
1550                                                 return 0;
1551 #endif
1552
1553                                         bnx2x_update_sge_prod(fp,
1554                                                         &cqe->fast_path_cqe);
1555                                         goto next_cqe;
1556                                 }
1557                         }
1558
1559                         pci_dma_sync_single_for_device(bp->pdev,
1560                                         pci_unmap_addr(rx_buf, mapping),
1561                                                        pad + RX_COPY_THRESH,
1562                                                        PCI_DMA_FROMDEVICE);
1563                         prefetch(skb);
1564                         prefetch(((char *)(skb)) + 128);
1565
1566                         /* is this an error packet? */
1567                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1568                                 DP(NETIF_MSG_RX_ERR,
1569                                    "ERROR  flags %x  rx packet %u\n",
1570                                    cqe_fp_flags, sw_comp_cons);
1571                                 fp->eth_q_stats.rx_err_discard_pkt++;
1572                                 goto reuse_rx;
1573                         }
1574
1575                         /* Since we don't have a jumbo ring
1576                          * copy small packets if mtu > 1500
1577                          */
1578                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1579                             (len <= RX_COPY_THRESH)) {
1580                                 struct sk_buff *new_skb;
1581
1582                                 new_skb = netdev_alloc_skb(bp->dev,
1583                                                            len + pad);
1584                                 if (new_skb == NULL) {
1585                                         DP(NETIF_MSG_RX_ERR,
1586                                            "ERROR  packet dropped "
1587                                            "because of alloc failure\n");
1588                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1589                                         goto reuse_rx;
1590                                 }
1591
1592                                 /* aligned copy */
1593                                 skb_copy_from_linear_data_offset(skb, pad,
1594                                                     new_skb->data + pad, len);
1595                                 skb_reserve(new_skb, pad);
1596                                 skb_put(new_skb, len);
1597
1598                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1599
1600                                 skb = new_skb;
1601
1602                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1603                                 pci_unmap_single(bp->pdev,
1604                                         pci_unmap_addr(rx_buf, mapping),
1605                                                  bp->rx_buf_size,
1606                                                  PCI_DMA_FROMDEVICE);
1607                                 skb_reserve(skb, pad);
1608                                 skb_put(skb, len);
1609
1610                         } else {
1611                                 DP(NETIF_MSG_RX_ERR,
1612                                    "ERROR  packet dropped because "
1613                                    "of alloc failure\n");
1614                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1615 reuse_rx:
1616                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1617                                 goto next_rx;
1618                         }
1619
1620                         skb->protocol = eth_type_trans(skb, bp->dev);
1621
1622                         skb->ip_summed = CHECKSUM_NONE;
1623                         if (bp->rx_csum) {
1624                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1625                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1626                                 else
1627                                         fp->eth_q_stats.hw_csum_err++;
1628                         }
1629                 }
1630
1631                 skb_record_rx_queue(skb, fp->index);
1632 #ifdef BCM_VLAN
1633                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1634                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1635                      PARSING_FLAGS_VLAN))
1636                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1637                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1638                 else
1639 #endif
1640                         netif_receive_skb(skb);
1641
1642
1643 next_rx:
1644                 rx_buf->skb = NULL;
1645
1646                 bd_cons = NEXT_RX_IDX(bd_cons);
1647                 bd_prod = NEXT_RX_IDX(bd_prod);
1648                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1649                 rx_pkt++;
1650 next_cqe:
1651                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1652                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1653
1654                 if (rx_pkt == budget)
1655                         break;
1656         } /* while */
1657
1658         fp->rx_bd_cons = bd_cons;
1659         fp->rx_bd_prod = bd_prod_fw;
1660         fp->rx_comp_cons = sw_comp_cons;
1661         fp->rx_comp_prod = sw_comp_prod;
1662
1663         /* Update producers */
1664         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1665                              fp->rx_sge_prod);
1666
1667         fp->rx_pkt += rx_pkt;
1668         fp->rx_calls++;
1669
1670         return rx_pkt;
1671 }
1672
1673 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1674 {
1675         struct bnx2x_fastpath *fp = fp_cookie;
1676         struct bnx2x *bp = fp->bp;
1677         int index = fp->index;
1678
1679         /* Return here if interrupt is disabled */
1680         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1681                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1682                 return IRQ_HANDLED;
1683         }
1684
1685         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1686            index, fp->sb_id);
1687         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1688
1689 #ifdef BNX2X_STOP_ON_ERROR
1690         if (unlikely(bp->panic))
1691                 return IRQ_HANDLED;
1692 #endif
1693
1694         prefetch(fp->rx_cons_sb);
1695         prefetch(fp->tx_cons_sb);
1696         prefetch(&fp->status_blk->c_status_block.status_block_index);
1697         prefetch(&fp->status_blk->u_status_block.status_block_index);
1698
1699         napi_schedule(&bnx2x_fp(bp, index, napi));
1700
1701         return IRQ_HANDLED;
1702 }
1703
1704 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1705 {
1706         struct bnx2x *bp = netdev_priv(dev_instance);
1707         u16 status = bnx2x_ack_int(bp);
1708         u16 mask;
1709
1710         /* Return here if interrupt is shared and it's not for us */
1711         if (unlikely(status == 0)) {
1712                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1713                 return IRQ_NONE;
1714         }
1715         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1716
1717         /* Return here if interrupt is disabled */
1718         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1719                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1720                 return IRQ_HANDLED;
1721         }
1722
1723 #ifdef BNX2X_STOP_ON_ERROR
1724         if (unlikely(bp->panic))
1725                 return IRQ_HANDLED;
1726 #endif
1727
1728         mask = 0x2 << bp->fp[0].sb_id;
1729         if (status & mask) {
1730                 struct bnx2x_fastpath *fp = &bp->fp[0];
1731
1732                 prefetch(fp->rx_cons_sb);
1733                 prefetch(fp->tx_cons_sb);
1734                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1735                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1736
1737                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1738
1739                 status &= ~mask;
1740         }
1741
1742
1743         if (unlikely(status & 0x1)) {
1744                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1745
1746                 status &= ~0x1;
1747                 if (!status)
1748                         return IRQ_HANDLED;
1749         }
1750
1751         if (status)
1752                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1753                    status);
1754
1755         return IRQ_HANDLED;
1756 }
1757
1758 /* end of fast path */
1759
1760 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1761
1762 /* Link */
1763
1764 /*
1765  * General service functions
1766  */
1767
1768 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1769 {
1770         u32 lock_status;
1771         u32 resource_bit = (1 << resource);
1772         int func = BP_FUNC(bp);
1773         u32 hw_lock_control_reg;
1774         int cnt;
1775
1776         /* Validating that the resource is within range */
1777         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1778                 DP(NETIF_MSG_HW,
1779                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1780                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1781                 return -EINVAL;
1782         }
1783
1784         if (func <= 5) {
1785                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1786         } else {
1787                 hw_lock_control_reg =
1788                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1789         }
1790
1791         /* Validating that the resource is not already taken */
1792         lock_status = REG_RD(bp, hw_lock_control_reg);
1793         if (lock_status & resource_bit) {
1794                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1795                    lock_status, resource_bit);
1796                 return -EEXIST;
1797         }
1798
1799         /* Try for 5 second every 5ms */
1800         for (cnt = 0; cnt < 1000; cnt++) {
1801                 /* Try to acquire the lock */
1802                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1803                 lock_status = REG_RD(bp, hw_lock_control_reg);
1804                 if (lock_status & resource_bit)
1805                         return 0;
1806
1807                 msleep(5);
1808         }
1809         DP(NETIF_MSG_HW, "Timeout\n");
1810         return -EAGAIN;
1811 }
1812
1813 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1814 {
1815         u32 lock_status;
1816         u32 resource_bit = (1 << resource);
1817         int func = BP_FUNC(bp);
1818         u32 hw_lock_control_reg;
1819
1820         /* Validating that the resource is within range */
1821         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1822                 DP(NETIF_MSG_HW,
1823                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1824                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1825                 return -EINVAL;
1826         }
1827
1828         if (func <= 5) {
1829                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1830         } else {
1831                 hw_lock_control_reg =
1832                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1833         }
1834
1835         /* Validating that the resource is currently taken */
1836         lock_status = REG_RD(bp, hw_lock_control_reg);
1837         if (!(lock_status & resource_bit)) {
1838                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1839                    lock_status, resource_bit);
1840                 return -EFAULT;
1841         }
1842
1843         REG_WR(bp, hw_lock_control_reg, resource_bit);
1844         return 0;
1845 }
1846
1847 /* HW Lock for shared dual port PHYs */
1848 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1849 {
1850         mutex_lock(&bp->port.phy_mutex);
1851
1852         if (bp->port.need_hw_lock)
1853                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1854 }
1855
1856 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1857 {
1858         if (bp->port.need_hw_lock)
1859                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1860
1861         mutex_unlock(&bp->port.phy_mutex);
1862 }
1863
1864 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1865 {
1866         /* The GPIO should be swapped if swap register is set and active */
1867         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1868                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1869         int gpio_shift = gpio_num +
1870                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1871         u32 gpio_mask = (1 << gpio_shift);
1872         u32 gpio_reg;
1873         int value;
1874
1875         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1876                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1877                 return -EINVAL;
1878         }
1879
1880         /* read GPIO value */
1881         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1882
1883         /* get the requested pin value */
1884         if ((gpio_reg & gpio_mask) == gpio_mask)
1885                 value = 1;
1886         else
1887                 value = 0;
1888
1889         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1890
1891         return value;
1892 }
1893
1894 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1895 {
1896         /* The GPIO should be swapped if swap register is set and active */
1897         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1898                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1899         int gpio_shift = gpio_num +
1900                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1901         u32 gpio_mask = (1 << gpio_shift);
1902         u32 gpio_reg;
1903
1904         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1905                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1906                 return -EINVAL;
1907         }
1908
1909         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1910         /* read GPIO and mask except the float bits */
1911         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1912
1913         switch (mode) {
1914         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1915                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1916                    gpio_num, gpio_shift);
1917                 /* clear FLOAT and set CLR */
1918                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1920                 break;
1921
1922         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1923                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1924                    gpio_num, gpio_shift);
1925                 /* clear FLOAT and set SET */
1926                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1927                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1928                 break;
1929
1930         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1931                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1932                    gpio_num, gpio_shift);
1933                 /* set FLOAT */
1934                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1935                 break;
1936
1937         default:
1938                 break;
1939         }
1940
1941         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1942         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1943
1944         return 0;
1945 }
1946
1947 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1948 {
1949         /* The GPIO should be swapped if swap register is set and active */
1950         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1951                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1952         int gpio_shift = gpio_num +
1953                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1954         u32 gpio_mask = (1 << gpio_shift);
1955         u32 gpio_reg;
1956
1957         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1958                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1959                 return -EINVAL;
1960         }
1961
1962         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1963         /* read GPIO int */
1964         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1965
1966         switch (mode) {
1967         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1968                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1969                                    "output low\n", gpio_num, gpio_shift);
1970                 /* clear SET and set CLR */
1971                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1972                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1973                 break;
1974
1975         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1976                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1977                                    "output high\n", gpio_num, gpio_shift);
1978                 /* clear CLR and set SET */
1979                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1980                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1981                 break;
1982
1983         default:
1984                 break;
1985         }
1986
1987         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1988         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1989
1990         return 0;
1991 }
1992
1993 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1994 {
1995         u32 spio_mask = (1 << spio_num);
1996         u32 spio_reg;
1997
1998         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1999             (spio_num > MISC_REGISTERS_SPIO_7)) {
2000                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2001                 return -EINVAL;
2002         }
2003
2004         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2005         /* read SPIO and mask except the float bits */
2006         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2007
2008         switch (mode) {
2009         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2010                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2011                 /* clear FLOAT and set CLR */
2012                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2013                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2014                 break;
2015
2016         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2017                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2018                 /* clear FLOAT and set SET */
2019                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2020                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2021                 break;
2022
2023         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2024                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2025                 /* set FLOAT */
2026                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2027                 break;
2028
2029         default:
2030                 break;
2031         }
2032
2033         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2034         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2035
2036         return 0;
2037 }
2038
2039 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2040 {
2041         switch (bp->link_vars.ieee_fc &
2042                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2043         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2044                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2045                                           ADVERTISED_Pause);
2046                 break;
2047
2048         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2049                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2050                                          ADVERTISED_Pause);
2051                 break;
2052
2053         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2054                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2055                 break;
2056
2057         default:
2058                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2059                                           ADVERTISED_Pause);
2060                 break;
2061         }
2062 }
2063
2064 static void bnx2x_link_report(struct bnx2x *bp)
2065 {
2066         if (bp->link_vars.link_up) {
2067                 if (bp->state == BNX2X_STATE_OPEN)
2068                         netif_carrier_on(bp->dev);
2069                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2070
2071                 printk("%d Mbps ", bp->link_vars.line_speed);
2072
2073                 if (bp->link_vars.duplex == DUPLEX_FULL)
2074                         printk("full duplex");
2075                 else
2076                         printk("half duplex");
2077
2078                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2079                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2080                                 printk(", receive ");
2081                                 if (bp->link_vars.flow_ctrl &
2082                                     BNX2X_FLOW_CTRL_TX)
2083                                         printk("& transmit ");
2084                         } else {
2085                                 printk(", transmit ");
2086                         }
2087                         printk("flow control ON");
2088                 }
2089                 printk("\n");
2090
2091         } else { /* link_down */
2092                 netif_carrier_off(bp->dev);
2093                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2094         }
2095 }
2096
2097 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2098 {
2099         if (!BP_NOMCP(bp)) {
2100                 u8 rc;
2101
2102                 /* Initialize link parameters structure variables */
2103                 /* It is recommended to turn off RX FC for jumbo frames
2104                    for better performance */
2105                 if (IS_E1HMF(bp))
2106                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2107                 else if (bp->dev->mtu > 5000)
2108                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2109                 else
2110                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2111
2112                 bnx2x_acquire_phy_lock(bp);
2113
2114                 if (load_mode == LOAD_DIAG)
2115                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2116
2117                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2118
2119                 bnx2x_release_phy_lock(bp);
2120
2121                 bnx2x_calc_fc_adv(bp);
2122
2123                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2124                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2125                         bnx2x_link_report(bp);
2126                 }
2127
2128                 return rc;
2129         }
2130         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2131         return -EINVAL;
2132 }
2133
2134 static void bnx2x_link_set(struct bnx2x *bp)
2135 {
2136         if (!BP_NOMCP(bp)) {
2137                 bnx2x_acquire_phy_lock(bp);
2138                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2139                 bnx2x_release_phy_lock(bp);
2140
2141                 bnx2x_calc_fc_adv(bp);
2142         } else
2143                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2144 }
2145
2146 static void bnx2x__link_reset(struct bnx2x *bp)
2147 {
2148         if (!BP_NOMCP(bp)) {
2149                 bnx2x_acquire_phy_lock(bp);
2150                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2151                 bnx2x_release_phy_lock(bp);
2152         } else
2153                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2154 }
2155
2156 static u8 bnx2x_link_test(struct bnx2x *bp)
2157 {
2158         u8 rc;
2159
2160         bnx2x_acquire_phy_lock(bp);
2161         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2162         bnx2x_release_phy_lock(bp);
2163
2164         return rc;
2165 }
2166
2167 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2168 {
2169         u32 r_param = bp->link_vars.line_speed / 8;
2170         u32 fair_periodic_timeout_usec;
2171         u32 t_fair;
2172
2173         memset(&(bp->cmng.rs_vars), 0,
2174                sizeof(struct rate_shaping_vars_per_port));
2175         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2176
2177         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2178         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2179
2180         /* this is the threshold below which no timer arming will occur
2181            1.25 coefficient is for the threshold to be a little bigger
2182            than the real time, to compensate for timer in-accuracy */
2183         bp->cmng.rs_vars.rs_threshold =
2184                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2185
2186         /* resolution of fairness timer */
2187         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2188         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2189         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2190
2191         /* this is the threshold below which we won't arm the timer anymore */
2192         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2193
2194         /* we multiply by 1e3/8 to get bytes/msec.
2195            We don't want the credits to pass a credit
2196            of the t_fair*FAIR_MEM (algorithm resolution) */
2197         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2198         /* since each tick is 4 usec */
2199         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2200 }
2201
2202 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2203 {
2204         struct rate_shaping_vars_per_vn m_rs_vn;
2205         struct fairness_vars_per_vn m_fair_vn;
2206         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2207         u16 vn_min_rate, vn_max_rate;
2208         int i;
2209
2210         /* If function is hidden - set min and max to zeroes */
2211         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2212                 vn_min_rate = 0;
2213                 vn_max_rate = 0;
2214
2215         } else {
2216                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2217                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2218                 /* If fairness is enabled (not all min rates are zeroes) and
2219                    if current min rate is zero - set it to 1.
2220                    This is a requirement of the algorithm. */
2221                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2222                         vn_min_rate = DEF_MIN_RATE;
2223                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2224                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2225         }
2226
2227         DP(NETIF_MSG_IFUP,
2228            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2229            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2230
2231         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2232         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2233
2234         /* global vn counter - maximal Mbps for this vn */
2235         m_rs_vn.vn_counter.rate = vn_max_rate;
2236
2237         /* quota - number of bytes transmitted in this period */
2238         m_rs_vn.vn_counter.quota =
2239                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2240
2241         if (bp->vn_weight_sum) {
2242                 /* credit for each period of the fairness algorithm:
2243                    number of bytes in T_FAIR (the vn share the port rate).
2244                    vn_weight_sum should not be larger than 10000, thus
2245                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2246                    than zero */
2247                 m_fair_vn.vn_credit_delta =
2248                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2249                                                  (8 * bp->vn_weight_sum))),
2250                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2251                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2252                    m_fair_vn.vn_credit_delta);
2253         }
2254
2255         /* Store it to internal memory */
2256         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2257                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2258                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2259                        ((u32 *)(&m_rs_vn))[i]);
2260
2261         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2262                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2263                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2264                        ((u32 *)(&m_fair_vn))[i]);
2265 }
2266
2267
2268 /* This function is called upon link interrupt */
2269 static void bnx2x_link_attn(struct bnx2x *bp)
2270 {
2271         /* Make sure that we are synced with the current statistics */
2272         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2273
2274         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2275
2276         if (bp->link_vars.link_up) {
2277
2278                 /* dropless flow control */
2279                 if (CHIP_IS_E1H(bp)) {
2280                         int port = BP_PORT(bp);
2281                         u32 pause_enabled = 0;
2282
2283                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2284                                 pause_enabled = 1;
2285
2286                         REG_WR(bp, BAR_USTRORM_INTMEM +
2287                                USTORM_PAUSE_ENABLED_OFFSET(port),
2288                                pause_enabled);
2289                 }
2290
2291                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2292                         struct host_port_stats *pstats;
2293
2294                         pstats = bnx2x_sp(bp, port_stats);
2295                         /* reset old bmac stats */
2296                         memset(&(pstats->mac_stx[0]), 0,
2297                                sizeof(struct mac_stx));
2298                 }
2299                 if ((bp->state == BNX2X_STATE_OPEN) ||
2300                     (bp->state == BNX2X_STATE_DISABLED))
2301                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2302         }
2303
2304         /* indicate link status */
2305         bnx2x_link_report(bp);
2306
2307         if (IS_E1HMF(bp)) {
2308                 int port = BP_PORT(bp);
2309                 int func;
2310                 int vn;
2311
2312                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2313                         if (vn == BP_E1HVN(bp))
2314                                 continue;
2315
2316                         func = ((vn << 1) | port);
2317
2318                         /* Set the attention towards other drivers
2319                            on the same port */
2320                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2321                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2322                 }
2323
2324                 if (bp->link_vars.link_up) {
2325                         int i;
2326
2327                         /* Init rate shaping and fairness contexts */
2328                         bnx2x_init_port_minmax(bp);
2329
2330                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2331                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2332
2333                         /* Store it to internal memory */
2334                         for (i = 0;
2335                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2336                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2337                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2338                                        ((u32 *)(&bp->cmng))[i]);
2339                 }
2340         }
2341 }
2342
2343 static void bnx2x__link_status_update(struct bnx2x *bp)
2344 {
2345         if (bp->state != BNX2X_STATE_OPEN)
2346                 return;
2347
2348         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2349
2350         if (bp->link_vars.link_up)
2351                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2352         else
2353                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2354
2355         /* indicate link status */
2356         bnx2x_link_report(bp);
2357 }
2358
2359 static void bnx2x_pmf_update(struct bnx2x *bp)
2360 {
2361         int port = BP_PORT(bp);
2362         u32 val;
2363
2364         bp->port.pmf = 1;
2365         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2366
2367         /* enable nig attention */
2368         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2369         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2370         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2371
2372         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2373 }
2374
2375 /* end of Link */
2376
2377 /* slow path */
2378
2379 /*
2380  * General service functions
2381  */
2382
2383 /* the slow path queue is odd since completions arrive on the fastpath ring */
2384 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2385                          u32 data_hi, u32 data_lo, int common)
2386 {
2387         int func = BP_FUNC(bp);
2388
2389         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2390            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2391            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2392            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2393            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2394
2395 #ifdef BNX2X_STOP_ON_ERROR
2396         if (unlikely(bp->panic))
2397                 return -EIO;
2398 #endif
2399
2400         spin_lock_bh(&bp->spq_lock);
2401
2402         if (!bp->spq_left) {
2403                 BNX2X_ERR("BUG! SPQ ring full!\n");
2404                 spin_unlock_bh(&bp->spq_lock);
2405                 bnx2x_panic();
2406                 return -EBUSY;
2407         }
2408
2409         /* CID needs port number to be encoded int it */
2410         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2411                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2412                                      HW_CID(bp, cid)));
2413         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2414         if (common)
2415                 bp->spq_prod_bd->hdr.type |=
2416                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2417
2418         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2419         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2420
2421         bp->spq_left--;
2422
2423         if (bp->spq_prod_bd == bp->spq_last_bd) {
2424                 bp->spq_prod_bd = bp->spq;
2425                 bp->spq_prod_idx = 0;
2426                 DP(NETIF_MSG_TIMER, "end of spq\n");
2427
2428         } else {
2429                 bp->spq_prod_bd++;
2430                 bp->spq_prod_idx++;
2431         }
2432
2433         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2434                bp->spq_prod_idx);
2435
2436         spin_unlock_bh(&bp->spq_lock);
2437         return 0;
2438 }
2439
2440 /* acquire split MCP access lock register */
2441 static int bnx2x_acquire_alr(struct bnx2x *bp)
2442 {
2443         u32 i, j, val;
2444         int rc = 0;
2445
2446         might_sleep();
2447         i = 100;
2448         for (j = 0; j < i*10; j++) {
2449                 val = (1UL << 31);
2450                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2451                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2452                 if (val & (1L << 31))
2453                         break;
2454
2455                 msleep(5);
2456         }
2457         if (!(val & (1L << 31))) {
2458                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2459                 rc = -EBUSY;
2460         }
2461
2462         return rc;
2463 }
2464
2465 /* release split MCP access lock register */
2466 static void bnx2x_release_alr(struct bnx2x *bp)
2467 {
2468         u32 val = 0;
2469
2470         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2471 }
2472
2473 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2474 {
2475         struct host_def_status_block *def_sb = bp->def_status_blk;
2476         u16 rc = 0;
2477
2478         barrier(); /* status block is written to by the chip */
2479         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2480                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2481                 rc |= 1;
2482         }
2483         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2484                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2485                 rc |= 2;
2486         }
2487         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2488                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2489                 rc |= 4;
2490         }
2491         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2492                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2493                 rc |= 8;
2494         }
2495         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2496                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2497                 rc |= 16;
2498         }
2499         return rc;
2500 }
2501
2502 /*
2503  * slow path service functions
2504  */
2505
2506 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2507 {
2508         int port = BP_PORT(bp);
2509         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2510                        COMMAND_REG_ATTN_BITS_SET);
2511         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2512                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2513         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2514                                        NIG_REG_MASK_INTERRUPT_PORT0;
2515         u32 aeu_mask;
2516         u32 nig_mask = 0;
2517
2518         if (bp->attn_state & asserted)
2519                 BNX2X_ERR("IGU ERROR\n");
2520
2521         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2522         aeu_mask = REG_RD(bp, aeu_addr);
2523
2524         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2525            aeu_mask, asserted);
2526         aeu_mask &= ~(asserted & 0xff);
2527         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2528
2529         REG_WR(bp, aeu_addr, aeu_mask);
2530         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2531
2532         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2533         bp->attn_state |= asserted;
2534         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2535
2536         if (asserted & ATTN_HARD_WIRED_MASK) {
2537                 if (asserted & ATTN_NIG_FOR_FUNC) {
2538
2539                         bnx2x_acquire_phy_lock(bp);
2540
2541                         /* save nig interrupt mask */
2542                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2543                         REG_WR(bp, nig_int_mask_addr, 0);
2544
2545                         bnx2x_link_attn(bp);
2546
2547                         /* handle unicore attn? */
2548                 }
2549                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2550                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2551
2552                 if (asserted & GPIO_2_FUNC)
2553                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2554
2555                 if (asserted & GPIO_3_FUNC)
2556                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2557
2558                 if (asserted & GPIO_4_FUNC)
2559                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2560
2561                 if (port == 0) {
2562                         if (asserted & ATTN_GENERAL_ATTN_1) {
2563                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2564                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2565                         }
2566                         if (asserted & ATTN_GENERAL_ATTN_2) {
2567                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2568                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2569                         }
2570                         if (asserted & ATTN_GENERAL_ATTN_3) {
2571                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2572                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2573                         }
2574                 } else {
2575                         if (asserted & ATTN_GENERAL_ATTN_4) {
2576                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2577                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2578                         }
2579                         if (asserted & ATTN_GENERAL_ATTN_5) {
2580                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2581                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2582                         }
2583                         if (asserted & ATTN_GENERAL_ATTN_6) {
2584                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2585                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2586                         }
2587                 }
2588
2589         } /* if hardwired */
2590
2591         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2592            asserted, hc_addr);
2593         REG_WR(bp, hc_addr, asserted);
2594
2595         /* now set back the mask */
2596         if (asserted & ATTN_NIG_FOR_FUNC) {
2597                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2598                 bnx2x_release_phy_lock(bp);
2599         }
2600 }
2601
2602 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2603 {
2604         int port = BP_PORT(bp);
2605         int reg_offset;
2606         u32 val;
2607
2608         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2609                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2610
2611         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2612
2613                 val = REG_RD(bp, reg_offset);
2614                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2615                 REG_WR(bp, reg_offset, val);
2616
2617                 BNX2X_ERR("SPIO5 hw attention\n");
2618
2619                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2620                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2621                         /* Fan failure attention */
2622
2623                         /* The PHY reset is controlled by GPIO 1 */
2624                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2625                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2626                         /* Low power mode is controlled by GPIO 2 */
2627                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2628                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2629                         /* mark the failure */
2630                         bp->link_params.ext_phy_config &=
2631                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2632                         bp->link_params.ext_phy_config |=
2633                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2634                         SHMEM_WR(bp,
2635                                  dev_info.port_hw_config[port].
2636                                                         external_phy_config,
2637                                  bp->link_params.ext_phy_config);
2638                         /* log the failure */
2639                         printk(KERN_ERR PFX "Fan Failure on Network"
2640                                " Controller %s has caused the driver to"
2641                                " shutdown the card to prevent permanent"
2642                                " damage.  Please contact Dell Support for"
2643                                " assistance\n", bp->dev->name);
2644                         break;
2645
2646                 default:
2647                         break;
2648                 }
2649         }
2650
2651         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2652                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2653                 bnx2x_acquire_phy_lock(bp);
2654                 bnx2x_handle_module_detect_int(&bp->link_params);
2655                 bnx2x_release_phy_lock(bp);
2656         }
2657
2658         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2659
2660                 val = REG_RD(bp, reg_offset);
2661                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2662                 REG_WR(bp, reg_offset, val);
2663
2664                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2665                           (attn & HW_INTERRUT_ASSERT_SET_0));
2666                 bnx2x_panic();
2667         }
2668 }
2669
2670 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2671 {
2672         u32 val;
2673
2674         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2675
2676                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2677                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2678                 /* DORQ discard attention */
2679                 if (val & 0x2)
2680                         BNX2X_ERR("FATAL error from DORQ\n");
2681         }
2682
2683         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2684
2685                 int port = BP_PORT(bp);
2686                 int reg_offset;
2687
2688                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2689                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2690
2691                 val = REG_RD(bp, reg_offset);
2692                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2693                 REG_WR(bp, reg_offset, val);
2694
2695                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2696                           (attn & HW_INTERRUT_ASSERT_SET_1));
2697                 bnx2x_panic();
2698         }
2699 }
2700
2701 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2702 {
2703         u32 val;
2704
2705         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2706
2707                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2708                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2709                 /* CFC error attention */
2710                 if (val & 0x2)
2711                         BNX2X_ERR("FATAL error from CFC\n");
2712         }
2713
2714         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2715
2716                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2717                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2718                 /* RQ_USDMDP_FIFO_OVERFLOW */
2719                 if (val & 0x18000)
2720                         BNX2X_ERR("FATAL error from PXP\n");
2721         }
2722
2723         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2724
2725                 int port = BP_PORT(bp);
2726                 int reg_offset;
2727
2728                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2729                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2730
2731                 val = REG_RD(bp, reg_offset);
2732                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2733                 REG_WR(bp, reg_offset, val);
2734
2735                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2736                           (attn & HW_INTERRUT_ASSERT_SET_2));
2737                 bnx2x_panic();
2738         }
2739 }
2740
2741 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2742 {
2743         u32 val;
2744
2745         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2746
2747                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2748                         int func = BP_FUNC(bp);
2749
2750                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2751                         bnx2x__link_status_update(bp);
2752                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2753                                                         DRV_STATUS_PMF)
2754                                 bnx2x_pmf_update(bp);
2755
2756                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2757
2758                         BNX2X_ERR("MC assert!\n");
2759                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2760                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2761                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2762                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2763                         bnx2x_panic();
2764
2765                 } else if (attn & BNX2X_MCP_ASSERT) {
2766
2767                         BNX2X_ERR("MCP assert!\n");
2768                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2769                         bnx2x_fw_dump(bp);
2770
2771                 } else
2772                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2773         }
2774
2775         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2776                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2777                 if (attn & BNX2X_GRC_TIMEOUT) {
2778                         val = CHIP_IS_E1H(bp) ?
2779                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2780                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2781                 }
2782                 if (attn & BNX2X_GRC_RSV) {
2783                         val = CHIP_IS_E1H(bp) ?
2784                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2785                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2786                 }
2787                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2788         }
2789 }
2790
2791 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2792 {
2793         struct attn_route attn;
2794         struct attn_route group_mask;
2795         int port = BP_PORT(bp);
2796         int index;
2797         u32 reg_addr;
2798         u32 val;
2799         u32 aeu_mask;
2800
2801         /* need to take HW lock because MCP or other port might also
2802            try to handle this event */
2803         bnx2x_acquire_alr(bp);
2804
2805         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2806         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2807         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2808         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2809         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2810            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2811
2812         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2813                 if (deasserted & (1 << index)) {
2814                         group_mask = bp->attn_group[index];
2815
2816                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2817                            index, group_mask.sig[0], group_mask.sig[1],
2818                            group_mask.sig[2], group_mask.sig[3]);
2819
2820                         bnx2x_attn_int_deasserted3(bp,
2821                                         attn.sig[3] & group_mask.sig[3]);
2822                         bnx2x_attn_int_deasserted1(bp,
2823                                         attn.sig[1] & group_mask.sig[1]);
2824                         bnx2x_attn_int_deasserted2(bp,
2825                                         attn.sig[2] & group_mask.sig[2]);
2826                         bnx2x_attn_int_deasserted0(bp,
2827                                         attn.sig[0] & group_mask.sig[0]);
2828
2829                         if ((attn.sig[0] & group_mask.sig[0] &
2830                                                 HW_PRTY_ASSERT_SET_0) ||
2831                             (attn.sig[1] & group_mask.sig[1] &
2832                                                 HW_PRTY_ASSERT_SET_1) ||
2833                             (attn.sig[2] & group_mask.sig[2] &
2834                                                 HW_PRTY_ASSERT_SET_2))
2835                                 BNX2X_ERR("FATAL HW block parity attention\n");
2836                 }
2837         }
2838
2839         bnx2x_release_alr(bp);
2840
2841         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2842
2843         val = ~deasserted;
2844         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2845            val, reg_addr);
2846         REG_WR(bp, reg_addr, val);
2847
2848         if (~bp->attn_state & deasserted)
2849                 BNX2X_ERR("IGU ERROR\n");
2850
2851         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2852                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2853
2854         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2855         aeu_mask = REG_RD(bp, reg_addr);
2856
2857         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2858            aeu_mask, deasserted);
2859         aeu_mask |= (deasserted & 0xff);
2860         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2861
2862         REG_WR(bp, reg_addr, aeu_mask);
2863         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2864
2865         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2866         bp->attn_state &= ~deasserted;
2867         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2868 }
2869
2870 static void bnx2x_attn_int(struct bnx2x *bp)
2871 {
2872         /* read local copy of bits */
2873         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2874                                                                 attn_bits);
2875         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2876                                                                 attn_bits_ack);
2877         u32 attn_state = bp->attn_state;
2878
2879         /* look for changed bits */
2880         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2881         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2882
2883         DP(NETIF_MSG_HW,
2884            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2885            attn_bits, attn_ack, asserted, deasserted);
2886
2887         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2888                 BNX2X_ERR("BAD attention state\n");
2889
2890         /* handle bits that were raised */
2891         if (asserted)
2892                 bnx2x_attn_int_asserted(bp, asserted);
2893
2894         if (deasserted)
2895                 bnx2x_attn_int_deasserted(bp, deasserted);
2896 }
2897
2898 static void bnx2x_sp_task(struct work_struct *work)
2899 {
2900         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2901         u16 status;
2902
2903
2904         /* Return here if interrupt is disabled */
2905         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2906                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2907                 return;
2908         }
2909
2910         status = bnx2x_update_dsb_idx(bp);
2911 /*      if (status == 0)                                     */
2912 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2913
2914         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2915
2916         /* HW attentions */
2917         if (status & 0x1)
2918                 bnx2x_attn_int(bp);
2919
2920         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2921                      IGU_INT_NOP, 1);
2922         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2923                      IGU_INT_NOP, 1);
2924         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2925                      IGU_INT_NOP, 1);
2926         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2927                      IGU_INT_NOP, 1);
2928         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2929                      IGU_INT_ENABLE, 1);
2930
2931 }
2932
2933 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2934 {
2935         struct net_device *dev = dev_instance;
2936         struct bnx2x *bp = netdev_priv(dev);
2937
2938         /* Return here if interrupt is disabled */
2939         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2940                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2941                 return IRQ_HANDLED;
2942         }
2943
2944         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2945
2946 #ifdef BNX2X_STOP_ON_ERROR
2947         if (unlikely(bp->panic))
2948                 return IRQ_HANDLED;
2949 #endif
2950
2951         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2952
2953         return IRQ_HANDLED;
2954 }
2955
2956 /* end of slow path */
2957
2958 /* Statistics */
2959
2960 /****************************************************************************
2961 * Macros
2962 ****************************************************************************/
2963
2964 /* sum[hi:lo] += add[hi:lo] */
2965 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2966         do { \
2967                 s_lo += a_lo; \
2968                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2969         } while (0)
2970
2971 /* difference = minuend - subtrahend */
2972 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2973         do { \
2974                 if (m_lo < s_lo) { \
2975                         /* underflow */ \
2976                         d_hi = m_hi - s_hi; \
2977                         if (d_hi > 0) { \
2978                                 /* we can 'loan' 1 */ \
2979                                 d_hi--; \
2980                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2981                         } else { \
2982                                 /* m_hi <= s_hi */ \
2983                                 d_hi = 0; \
2984                                 d_lo = 0; \
2985                         } \
2986                 } else { \
2987                         /* m_lo >= s_lo */ \
2988                         if (m_hi < s_hi) { \
2989                                 d_hi = 0; \
2990                                 d_lo = 0; \
2991                         } else { \
2992                                 /* m_hi >= s_hi */ \
2993                                 d_hi = m_hi - s_hi; \
2994                                 d_lo = m_lo - s_lo; \
2995                         } \
2996                 } \
2997         } while (0)
2998
2999 #define UPDATE_STAT64(s, t) \
3000         do { \
3001                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3002                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3003                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3004                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3005                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3006                        pstats->mac_stx[1].t##_lo, diff.lo); \
3007         } while (0)
3008
3009 #define UPDATE_STAT64_NIG(s, t) \
3010         do { \
3011                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3012                         diff.lo, new->s##_lo, old->s##_lo); \
3013                 ADD_64(estats->t##_hi, diff.hi, \
3014                        estats->t##_lo, diff.lo); \
3015         } while (0)
3016
3017 /* sum[hi:lo] += add */
3018 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3019         do { \
3020                 s_lo += a; \
3021                 s_hi += (s_lo < a) ? 1 : 0; \
3022         } while (0)
3023
3024 #define UPDATE_EXTEND_STAT(s) \
3025         do { \
3026                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3027                               pstats->mac_stx[1].s##_lo, \
3028                               new->s); \
3029         } while (0)
3030
3031 #define UPDATE_EXTEND_TSTAT(s, t) \
3032         do { \
3033                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3034                 old_tclient->s = tclient->s; \
3035                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3036         } while (0)
3037
3038 #define UPDATE_EXTEND_USTAT(s, t) \
3039         do { \
3040                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3041                 old_uclient->s = uclient->s; \
3042                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3043         } while (0)
3044
3045 #define UPDATE_EXTEND_XSTAT(s, t) \
3046         do { \
3047                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3048                 old_xclient->s = xclient->s; \
3049                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3050         } while (0)
3051
3052 /* minuend -= subtrahend */
3053 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3054         do { \
3055                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3056         } while (0)
3057
3058 /* minuend[hi:lo] -= subtrahend */
3059 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3060         do { \
3061                 SUB_64(m_hi, 0, m_lo, s); \
3062         } while (0)
3063
3064 #define SUB_EXTEND_USTAT(s, t) \
3065         do { \
3066                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3067                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3068         } while (0)
3069
3070 /*
3071  * General service functions
3072  */
3073
3074 static inline long bnx2x_hilo(u32 *hiref)
3075 {
3076         u32 lo = *(hiref + 1);
3077 #if (BITS_PER_LONG == 64)
3078         u32 hi = *hiref;
3079
3080         return HILO_U64(hi, lo);
3081 #else
3082         return lo;
3083 #endif
3084 }
3085
3086 /*
3087  * Init service functions
3088  */
3089
3090 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3091 {
3092         if (!bp->stats_pending) {
3093                 struct eth_query_ramrod_data ramrod_data = {0};
3094                 int i, rc;
3095
3096                 ramrod_data.drv_counter = bp->stats_counter++;
3097                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3098                 for_each_queue(bp, i)
3099                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3100
3101                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3102                                    ((u32 *)&ramrod_data)[1],
3103                                    ((u32 *)&ramrod_data)[0], 0);
3104                 if (rc == 0) {
3105                         /* stats ramrod has it's own slot on the spq */
3106                         bp->spq_left++;
3107                         bp->stats_pending = 1;
3108                 }
3109         }
3110 }
3111
3112 static void bnx2x_stats_init(struct bnx2x *bp)
3113 {
3114         int port = BP_PORT(bp);
3115         int i;
3116
3117         bp->stats_pending = 0;
3118         bp->executer_idx = 0;
3119         bp->stats_counter = 0;
3120
3121         /* port stats */
3122         if (!BP_NOMCP(bp))
3123                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3124         else
3125                 bp->port.port_stx = 0;
3126         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3127
3128         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3129         bp->port.old_nig_stats.brb_discard =
3130                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3131         bp->port.old_nig_stats.brb_truncate =
3132                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3133         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3134                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3135         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3136                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3137
3138         /* function stats */
3139         for_each_queue(bp, i) {
3140                 struct bnx2x_fastpath *fp = &bp->fp[i];
3141
3142                 memset(&fp->old_tclient, 0,
3143                        sizeof(struct tstorm_per_client_stats));
3144                 memset(&fp->old_uclient, 0,
3145                        sizeof(struct ustorm_per_client_stats));
3146                 memset(&fp->old_xclient, 0,
3147                        sizeof(struct xstorm_per_client_stats));
3148                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3149         }
3150
3151         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3152         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3153
3154         bp->stats_state = STATS_STATE_DISABLED;
3155         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3156                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3157 }
3158
3159 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3160 {
3161         struct dmae_command *dmae = &bp->stats_dmae;
3162         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3163
3164         *stats_comp = DMAE_COMP_VAL;
3165         if (CHIP_REV_IS_SLOW(bp))
3166                 return;
3167
3168         /* loader */
3169         if (bp->executer_idx) {
3170                 int loader_idx = PMF_DMAE_C(bp);
3171
3172                 memset(dmae, 0, sizeof(struct dmae_command));
3173
3174                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3175                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3176                                 DMAE_CMD_DST_RESET |
3177 #ifdef __BIG_ENDIAN
3178                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3179 #else
3180                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3181 #endif
3182                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3183                                                DMAE_CMD_PORT_0) |
3184                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3185                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3186                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3187                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3188                                      sizeof(struct dmae_command) *
3189                                      (loader_idx + 1)) >> 2;
3190                 dmae->dst_addr_hi = 0;
3191                 dmae->len = sizeof(struct dmae_command) >> 2;
3192                 if (CHIP_IS_E1(bp))
3193                         dmae->len--;
3194                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3195                 dmae->comp_addr_hi = 0;
3196                 dmae->comp_val = 1;
3197
3198                 *stats_comp = 0;
3199                 bnx2x_post_dmae(bp, dmae, loader_idx);
3200
3201         } else if (bp->func_stx) {
3202                 *stats_comp = 0;
3203                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3204         }
3205 }
3206
3207 static int bnx2x_stats_comp(struct bnx2x *bp)
3208 {
3209         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3210         int cnt = 10;
3211
3212         might_sleep();
3213         while (*stats_comp != DMAE_COMP_VAL) {
3214                 if (!cnt) {
3215                         BNX2X_ERR("timeout waiting for stats finished\n");
3216                         break;
3217                 }
3218                 cnt--;
3219                 msleep(1);
3220         }
3221         return 1;
3222 }
3223
3224 /*
3225  * Statistics service functions
3226  */
3227
3228 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3229 {
3230         struct dmae_command *dmae;
3231         u32 opcode;
3232         int loader_idx = PMF_DMAE_C(bp);
3233         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3234
3235         /* sanity */
3236         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3237                 BNX2X_ERR("BUG!\n");
3238                 return;
3239         }
3240
3241         bp->executer_idx = 0;
3242
3243         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3244                   DMAE_CMD_C_ENABLE |
3245                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3246 #ifdef __BIG_ENDIAN
3247                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3248 #else
3249                   DMAE_CMD_ENDIANITY_DW_SWAP |
3250 #endif
3251                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3252                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3253
3254         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3255         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3256         dmae->src_addr_lo = bp->port.port_stx >> 2;
3257         dmae->src_addr_hi = 0;
3258         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3259         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3260         dmae->len = DMAE_LEN32_RD_MAX;
3261         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3262         dmae->comp_addr_hi = 0;
3263         dmae->comp_val = 1;
3264
3265         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3266         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3267         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3268         dmae->src_addr_hi = 0;
3269         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3270                                    DMAE_LEN32_RD_MAX * 4);
3271         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3272                                    DMAE_LEN32_RD_MAX * 4);
3273         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3274         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3275         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3276         dmae->comp_val = DMAE_COMP_VAL;
3277
3278         *stats_comp = 0;
3279         bnx2x_hw_stats_post(bp);
3280         bnx2x_stats_comp(bp);
3281 }
3282
3283 static void bnx2x_port_stats_init(struct bnx2x *bp)
3284 {
3285         struct dmae_command *dmae;
3286         int port = BP_PORT(bp);
3287         int vn = BP_E1HVN(bp);
3288         u32 opcode;
3289         int loader_idx = PMF_DMAE_C(bp);
3290         u32 mac_addr;
3291         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3292
3293         /* sanity */
3294         if (!bp->link_vars.link_up || !bp->port.pmf) {
3295                 BNX2X_ERR("BUG!\n");
3296                 return;
3297         }
3298
3299         bp->executer_idx = 0;
3300
3301         /* MCP */
3302         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3303                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3304                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3305 #ifdef __BIG_ENDIAN
3306                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3307 #else
3308                   DMAE_CMD_ENDIANITY_DW_SWAP |
3309 #endif
3310                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3311                   (vn << DMAE_CMD_E1HVN_SHIFT));
3312
3313         if (bp->port.port_stx) {
3314
3315                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3316                 dmae->opcode = opcode;
3317                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3318                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3319                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3320                 dmae->dst_addr_hi = 0;
3321                 dmae->len = sizeof(struct host_port_stats) >> 2;
3322                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323                 dmae->comp_addr_hi = 0;
3324                 dmae->comp_val = 1;
3325         }
3326
3327         if (bp->func_stx) {
3328
3329                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3330                 dmae->opcode = opcode;
3331                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3332                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3333                 dmae->dst_addr_lo = bp->func_stx >> 2;
3334                 dmae->dst_addr_hi = 0;
3335                 dmae->len = sizeof(struct host_func_stats) >> 2;
3336                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337                 dmae->comp_addr_hi = 0;
3338                 dmae->comp_val = 1;
3339         }
3340
3341         /* MAC */
3342         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3343                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3344                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3345 #ifdef __BIG_ENDIAN
3346                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3347 #else
3348                   DMAE_CMD_ENDIANITY_DW_SWAP |
3349 #endif
3350                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3351                   (vn << DMAE_CMD_E1HVN_SHIFT));
3352
3353         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3354
3355                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3356                                    NIG_REG_INGRESS_BMAC0_MEM);
3357
3358                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3359                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3360                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3361                 dmae->opcode = opcode;
3362                 dmae->src_addr_lo = (mac_addr +
3363                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3364                 dmae->src_addr_hi = 0;
3365                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3366                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3367                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3368                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3369                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3370                 dmae->comp_addr_hi = 0;
3371                 dmae->comp_val = 1;
3372
3373                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3374                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3375                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3376                 dmae->opcode = opcode;
3377                 dmae->src_addr_lo = (mac_addr +
3378                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3379                 dmae->src_addr_hi = 0;
3380                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3381                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3382                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3383                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3384                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3385                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3386                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3387                 dmae->comp_addr_hi = 0;
3388                 dmae->comp_val = 1;
3389
3390         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3391
3392                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3393
3394                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3395                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3396                 dmae->opcode = opcode;
3397                 dmae->src_addr_lo = (mac_addr +
3398                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3399                 dmae->src_addr_hi = 0;
3400                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3401                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3402                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3403                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3404                 dmae->comp_addr_hi = 0;
3405                 dmae->comp_val = 1;
3406
3407                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3408                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3409                 dmae->opcode = opcode;
3410                 dmae->src_addr_lo = (mac_addr +
3411                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3412                 dmae->src_addr_hi = 0;
3413                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3414                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3415                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3416                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3417                 dmae->len = 1;
3418                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3419                 dmae->comp_addr_hi = 0;
3420                 dmae->comp_val = 1;
3421
3422                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3423                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3424                 dmae->opcode = opcode;
3425                 dmae->src_addr_lo = (mac_addr +
3426                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3427                 dmae->src_addr_hi = 0;
3428                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3429                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3430                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3431                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3432                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3433                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3434                 dmae->comp_addr_hi = 0;
3435                 dmae->comp_val = 1;
3436         }
3437
3438         /* NIG */
3439         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3440         dmae->opcode = opcode;
3441         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3442                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3443         dmae->src_addr_hi = 0;
3444         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3445         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3446         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3447         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3448         dmae->comp_addr_hi = 0;
3449         dmae->comp_val = 1;
3450
3451         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3452         dmae->opcode = opcode;
3453         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3454                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3455         dmae->src_addr_hi = 0;
3456         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3457                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3458         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3459                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3460         dmae->len = (2*sizeof(u32)) >> 2;
3461         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3462         dmae->comp_addr_hi = 0;
3463         dmae->comp_val = 1;
3464
3465         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3466         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3467                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3468                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3469 #ifdef __BIG_ENDIAN
3470                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3471 #else
3472                         DMAE_CMD_ENDIANITY_DW_SWAP |
3473 #endif
3474                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3475                         (vn << DMAE_CMD_E1HVN_SHIFT));
3476         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3477                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3478         dmae->src_addr_hi = 0;
3479         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3480                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3481         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3482                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3483         dmae->len = (2*sizeof(u32)) >> 2;
3484         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3485         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3486         dmae->comp_val = DMAE_COMP_VAL;
3487
3488         *stats_comp = 0;
3489 }
3490
3491 static void bnx2x_func_stats_init(struct bnx2x *bp)
3492 {
3493         struct dmae_command *dmae = &bp->stats_dmae;
3494         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3495
3496         /* sanity */
3497         if (!bp->func_stx) {
3498                 BNX2X_ERR("BUG!\n");
3499                 return;
3500         }
3501
3502         bp->executer_idx = 0;
3503         memset(dmae, 0, sizeof(struct dmae_command));
3504
3505         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3506                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3507                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3508 #ifdef __BIG_ENDIAN
3509                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3510 #else
3511                         DMAE_CMD_ENDIANITY_DW_SWAP |
3512 #endif
3513                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3514                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3515         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3516         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3517         dmae->dst_addr_lo = bp->func_stx >> 2;
3518         dmae->dst_addr_hi = 0;
3519         dmae->len = sizeof(struct host_func_stats) >> 2;
3520         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3521         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3522         dmae->comp_val = DMAE_COMP_VAL;
3523
3524         *stats_comp = 0;
3525 }
3526
3527 static void bnx2x_stats_start(struct bnx2x *bp)
3528 {
3529         if (bp->port.pmf)
3530                 bnx2x_port_stats_init(bp);
3531
3532         else if (bp->func_stx)
3533                 bnx2x_func_stats_init(bp);
3534
3535         bnx2x_hw_stats_post(bp);
3536         bnx2x_storm_stats_post(bp);
3537 }
3538
3539 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3540 {
3541         bnx2x_stats_comp(bp);
3542         bnx2x_stats_pmf_update(bp);
3543         bnx2x_stats_start(bp);
3544 }
3545
3546 static void bnx2x_stats_restart(struct bnx2x *bp)
3547 {
3548         bnx2x_stats_comp(bp);
3549         bnx2x_stats_start(bp);
3550 }
3551
3552 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3553 {
3554         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3555         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3556         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3557         struct {
3558                 u32 lo;
3559                 u32 hi;
3560         } diff;
3561
3562         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3563         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3564         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3565         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3566         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3567         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3568         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3569         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3570         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3571         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3572         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3573         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3574         UPDATE_STAT64(tx_stat_gt127,
3575                                 tx_stat_etherstatspkts65octetsto127octets);
3576         UPDATE_STAT64(tx_stat_gt255,
3577                                 tx_stat_etherstatspkts128octetsto255octets);
3578         UPDATE_STAT64(tx_stat_gt511,
3579                                 tx_stat_etherstatspkts256octetsto511octets);
3580         UPDATE_STAT64(tx_stat_gt1023,
3581                                 tx_stat_etherstatspkts512octetsto1023octets);
3582         UPDATE_STAT64(tx_stat_gt1518,
3583                                 tx_stat_etherstatspkts1024octetsto1522octets);
3584         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3585         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3586         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3587         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3588         UPDATE_STAT64(tx_stat_gterr,
3589                                 tx_stat_dot3statsinternalmactransmiterrors);
3590         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3591
3592         estats->pause_frames_received_hi =
3593                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3594         estats->pause_frames_received_lo =
3595                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3596
3597         estats->pause_frames_sent_hi =
3598                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3599         estats->pause_frames_sent_lo =
3600                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3601 }
3602
3603 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3604 {
3605         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3606         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3607         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3608
3609         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3610         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3611         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3612         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3613         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3614         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3615         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3616         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3617         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3618         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3619         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3620         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3621         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3622         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3623         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3624         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3625         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3626         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3627         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3628         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3629         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3630         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3631         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3632         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3633         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3634         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3635         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3636         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3637         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3638         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3639         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3640
3641         estats->pause_frames_received_hi =
3642                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3643         estats->pause_frames_received_lo =
3644                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3645         ADD_64(estats->pause_frames_received_hi,
3646                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3647                estats->pause_frames_received_lo,
3648                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3649
3650         estats->pause_frames_sent_hi =
3651                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3652         estats->pause_frames_sent_lo =
3653                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3654         ADD_64(estats->pause_frames_sent_hi,
3655                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3656                estats->pause_frames_sent_lo,
3657                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3658 }
3659
3660 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3661 {
3662         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3663         struct nig_stats *old = &(bp->port.old_nig_stats);
3664         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3665         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3666         struct {
3667                 u32 lo;
3668                 u32 hi;
3669         } diff;
3670         u32 nig_timer_max;
3671
3672         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3673                 bnx2x_bmac_stats_update(bp);
3674
3675         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3676                 bnx2x_emac_stats_update(bp);
3677
3678         else { /* unreached */
3679                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3680                 return -1;
3681         }
3682
3683         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3684                       new->brb_discard - old->brb_discard);
3685         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3686                       new->brb_truncate - old->brb_truncate);
3687
3688         UPDATE_STAT64_NIG(egress_mac_pkt0,
3689                                         etherstatspkts1024octetsto1522octets);
3690         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3691
3692         memcpy(old, new, sizeof(struct nig_stats));
3693
3694         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3695                sizeof(struct mac_stx));
3696         estats->brb_drop_hi = pstats->brb_drop_hi;
3697         estats->brb_drop_lo = pstats->brb_drop_lo;
3698
3699         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3700
3701         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3702         if (nig_timer_max != estats->nig_timer_max) {
3703                 estats->nig_timer_max = nig_timer_max;
3704                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3705         }
3706
3707         return 0;
3708 }
3709
3710 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3711 {
3712         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3713         struct tstorm_per_port_stats *tport =
3714                                         &stats->tstorm_common.port_statistics;
3715         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3716         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3717         int i;
3718
3719         memset(&(fstats->total_bytes_received_hi), 0,
3720                sizeof(struct host_func_stats) - 2*sizeof(u32));
3721         estats->error_bytes_received_hi = 0;
3722         estats->error_bytes_received_lo = 0;
3723         estats->etherstatsoverrsizepkts_hi = 0;
3724         estats->etherstatsoverrsizepkts_lo = 0;
3725         estats->no_buff_discard_hi = 0;
3726         estats->no_buff_discard_lo = 0;
3727
3728         for_each_queue(bp, i) {
3729                 struct bnx2x_fastpath *fp = &bp->fp[i];
3730                 int cl_id = fp->cl_id;
3731                 struct tstorm_per_client_stats *tclient =
3732                                 &stats->tstorm_common.client_statistics[cl_id];
3733                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3734                 struct ustorm_per_client_stats *uclient =
3735                                 &stats->ustorm_common.client_statistics[cl_id];
3736                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3737                 struct xstorm_per_client_stats *xclient =
3738                                 &stats->xstorm_common.client_statistics[cl_id];
3739                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3740                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3741                 u32 diff;
3742
3743                 /* are storm stats valid? */
3744                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3745                                                         bp->stats_counter) {
3746                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3747                            "  xstorm counter (%d) != stats_counter (%d)\n",
3748                            i, xclient->stats_counter, bp->stats_counter);
3749                         return -1;
3750                 }
3751                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3752                                                         bp->stats_counter) {
3753                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3754                            "  tstorm counter (%d) != stats_counter (%d)\n",
3755                            i, tclient->stats_counter, bp->stats_counter);
3756                         return -2;
3757                 }
3758                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3759                                                         bp->stats_counter) {
3760                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3761                            "  ustorm counter (%d) != stats_counter (%d)\n",
3762                            i, uclient->stats_counter, bp->stats_counter);
3763                         return -4;
3764                 }
3765
3766                 qstats->total_bytes_received_hi =
3767                 qstats->valid_bytes_received_hi =
3768                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3769                 qstats->total_bytes_received_lo =
3770                 qstats->valid_bytes_received_lo =
3771                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3772
3773                 qstats->error_bytes_received_hi =
3774                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3775                 qstats->error_bytes_received_lo =
3776                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3777
3778                 ADD_64(qstats->total_bytes_received_hi,
3779                        qstats->error_bytes_received_hi,
3780                        qstats->total_bytes_received_lo,
3781                        qstats->error_bytes_received_lo);
3782
3783                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3784                                         total_unicast_packets_received);
3785                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3786                                         total_multicast_packets_received);
3787                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3788                                         total_broadcast_packets_received);
3789                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3790                                         etherstatsoverrsizepkts);
3791                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3792
3793                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3794                                         total_unicast_packets_received);
3795                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3796                                         total_multicast_packets_received);
3797                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3798                                         total_broadcast_packets_received);
3799                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3800                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3801                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3802
3803                 qstats->total_bytes_transmitted_hi =
3804                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3805                 qstats->total_bytes_transmitted_lo =
3806                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3807
3808                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3809                                         total_unicast_packets_transmitted);
3810                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3811                                         total_multicast_packets_transmitted);
3812                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3813                                         total_broadcast_packets_transmitted);
3814
3815                 old_tclient->checksum_discard = tclient->checksum_discard;
3816                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3817
3818                 ADD_64(fstats->total_bytes_received_hi,
3819                        qstats->total_bytes_received_hi,
3820                        fstats->total_bytes_received_lo,
3821                        qstats->total_bytes_received_lo);
3822                 ADD_64(fstats->total_bytes_transmitted_hi,
3823                        qstats->total_bytes_transmitted_hi,
3824                        fstats->total_bytes_transmitted_lo,
3825                        qstats->total_bytes_transmitted_lo);
3826                 ADD_64(fstats->total_unicast_packets_received_hi,
3827                        qstats->total_unicast_packets_received_hi,
3828                        fstats->total_unicast_packets_received_lo,
3829                        qstats->total_unicast_packets_received_lo);
3830                 ADD_64(fstats->total_multicast_packets_received_hi,
3831                        qstats->total_multicast_packets_received_hi,
3832                        fstats->total_multicast_packets_received_lo,
3833                        qstats->total_multicast_packets_received_lo);
3834                 ADD_64(fstats->total_broadcast_packets_received_hi,
3835                        qstats->total_broadcast_packets_received_hi,
3836                        fstats->total_broadcast_packets_received_lo,
3837                        qstats->total_broadcast_packets_received_lo);
3838                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3839                        qstats->total_unicast_packets_transmitted_hi,
3840                        fstats->total_unicast_packets_transmitted_lo,
3841                        qstats->total_unicast_packets_transmitted_lo);
3842                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3843                        qstats->total_multicast_packets_transmitted_hi,
3844                        fstats->total_multicast_packets_transmitted_lo,
3845                        qstats->total_multicast_packets_transmitted_lo);
3846                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3847                        qstats->total_broadcast_packets_transmitted_hi,
3848                        fstats->total_broadcast_packets_transmitted_lo,
3849                        qstats->total_broadcast_packets_transmitted_lo);
3850                 ADD_64(fstats->valid_bytes_received_hi,
3851                        qstats->valid_bytes_received_hi,
3852                        fstats->valid_bytes_received_lo,
3853                        qstats->valid_bytes_received_lo);
3854
3855                 ADD_64(estats->error_bytes_received_hi,
3856                        qstats->error_bytes_received_hi,
3857                        estats->error_bytes_received_lo,
3858                        qstats->error_bytes_received_lo);
3859                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3860                        qstats->etherstatsoverrsizepkts_hi,
3861                        estats->etherstatsoverrsizepkts_lo,
3862                        qstats->etherstatsoverrsizepkts_lo);
3863                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3864                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3865         }
3866
3867         ADD_64(fstats->total_bytes_received_hi,
3868                estats->rx_stat_ifhcinbadoctets_hi,
3869                fstats->total_bytes_received_lo,
3870                estats->rx_stat_ifhcinbadoctets_lo);
3871
3872         memcpy(estats, &(fstats->total_bytes_received_hi),
3873                sizeof(struct host_func_stats) - 2*sizeof(u32));
3874
3875         ADD_64(estats->etherstatsoverrsizepkts_hi,
3876                estats->rx_stat_dot3statsframestoolong_hi,
3877                estats->etherstatsoverrsizepkts_lo,
3878                estats->rx_stat_dot3statsframestoolong_lo);
3879         ADD_64(estats->error_bytes_received_hi,
3880                estats->rx_stat_ifhcinbadoctets_hi,
3881                estats->error_bytes_received_lo,
3882                estats->rx_stat_ifhcinbadoctets_lo);
3883
3884         if (bp->port.pmf) {
3885                 estats->mac_filter_discard =
3886                                 le32_to_cpu(tport->mac_filter_discard);
3887                 estats->xxoverflow_discard =
3888                                 le32_to_cpu(tport->xxoverflow_discard);
3889                 estats->brb_truncate_discard =
3890                                 le32_to_cpu(tport->brb_truncate_discard);
3891                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3892         }
3893
3894         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3895
3896         bp->stats_pending = 0;
3897
3898         return 0;
3899 }
3900
3901 static void bnx2x_net_stats_update(struct bnx2x *bp)
3902 {
3903         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3904         struct net_device_stats *nstats = &bp->dev->stats;
3905         int i;
3906
3907         nstats->rx_packets =
3908                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3909                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3910                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3911
3912         nstats->tx_packets =
3913                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3914                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3915                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3916
3917         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3918
3919         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3920
3921         nstats->rx_dropped = estats->mac_discard;
3922         for_each_queue(bp, i)
3923                 nstats->rx_dropped +=
3924                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3925
3926         nstats->tx_dropped = 0;
3927
3928         nstats->multicast =
3929                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3930
3931         nstats->collisions =
3932                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3933
3934         nstats->rx_length_errors =
3935                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3936                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3937         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3938                                  bnx2x_hilo(&estats->brb_truncate_hi);
3939         nstats->rx_crc_errors =
3940                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3941         nstats->rx_frame_errors =
3942                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3943         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3944         nstats->rx_missed_errors = estats->xxoverflow_discard;
3945
3946         nstats->rx_errors = nstats->rx_length_errors +
3947                             nstats->rx_over_errors +
3948                             nstats->rx_crc_errors +
3949                             nstats->rx_frame_errors +
3950                             nstats->rx_fifo_errors +
3951                             nstats->rx_missed_errors;
3952
3953         nstats->tx_aborted_errors =
3954                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3955                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3956         nstats->tx_carrier_errors =
3957                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3958         nstats->tx_fifo_errors = 0;
3959         nstats->tx_heartbeat_errors = 0;
3960         nstats->tx_window_errors = 0;
3961
3962         nstats->tx_errors = nstats->tx_aborted_errors +
3963                             nstats->tx_carrier_errors +
3964             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3965 }
3966
3967 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3968 {
3969         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3970         int i;
3971
3972         estats->driver_xoff = 0;
3973         estats->rx_err_discard_pkt = 0;
3974         estats->rx_skb_alloc_failed = 0;
3975         estats->hw_csum_err = 0;
3976         for_each_queue(bp, i) {
3977                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3978
3979                 estats->driver_xoff += qstats->driver_xoff;
3980                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3981                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3982                 estats->hw_csum_err += qstats->hw_csum_err;
3983         }
3984 }
3985
3986 static void bnx2x_stats_update(struct bnx2x *bp)
3987 {
3988         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3989
3990         if (*stats_comp != DMAE_COMP_VAL)
3991                 return;
3992
3993         if (bp->port.pmf)
3994                 bnx2x_hw_stats_update(bp);
3995
3996         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3997                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3998                 bnx2x_panic();
3999                 return;
4000         }
4001
4002         bnx2x_net_stats_update(bp);
4003         bnx2x_drv_stats_update(bp);
4004
4005         if (bp->msglevel & NETIF_MSG_TIMER) {
4006                 struct tstorm_per_client_stats *old_tclient =
4007                                                         &bp->fp->old_tclient;
4008                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4009                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4010                 struct net_device_stats *nstats = &bp->dev->stats;
4011                 int i;
4012
4013                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4014                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4015                                   "  tx pkt (%lx)\n",
4016                        bnx2x_tx_avail(bp->fp),
4017                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4018                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4019                                   "  rx pkt (%lx)\n",
4020                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4021                              bp->fp->rx_comp_cons),
4022                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4023                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4024                                   "brb truncate %u\n",
4025                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4026                        qstats->driver_xoff,
4027                        estats->brb_drop_lo, estats->brb_truncate_lo);
4028                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4029                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4030                         "mac_discard %u  mac_filter_discard %u  "
4031                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4032                         "ttl0_discard %u\n",
4033                        le32_to_cpu(old_tclient->checksum_discard),
4034                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4035                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4036                        estats->mac_discard, estats->mac_filter_discard,
4037                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4038                        le32_to_cpu(old_tclient->ttl0_discard));
4039
4040                 for_each_queue(bp, i) {
4041                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4042                                bnx2x_fp(bp, i, tx_pkt),
4043                                bnx2x_fp(bp, i, rx_pkt),
4044                                bnx2x_fp(bp, i, rx_calls));
4045                 }
4046         }
4047
4048         bnx2x_hw_stats_post(bp);
4049         bnx2x_storm_stats_post(bp);
4050 }
4051
4052 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4053 {
4054         struct dmae_command *dmae;
4055         u32 opcode;
4056         int loader_idx = PMF_DMAE_C(bp);
4057         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4058
4059         bp->executer_idx = 0;
4060
4061         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4062                   DMAE_CMD_C_ENABLE |
4063                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4064 #ifdef __BIG_ENDIAN
4065                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4066 #else
4067                   DMAE_CMD_ENDIANITY_DW_SWAP |
4068 #endif
4069                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4070                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4071
4072         if (bp->port.port_stx) {
4073
4074                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4075                 if (bp->func_stx)
4076                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4077                 else
4078                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4079                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4080                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4081                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4082                 dmae->dst_addr_hi = 0;
4083                 dmae->len = sizeof(struct host_port_stats) >> 2;
4084                 if (bp->func_stx) {
4085                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4086                         dmae->comp_addr_hi = 0;
4087                         dmae->comp_val = 1;
4088                 } else {
4089                         dmae->comp_addr_lo =
4090                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4091                         dmae->comp_addr_hi =
4092                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4093                         dmae->comp_val = DMAE_COMP_VAL;
4094
4095                         *stats_comp = 0;
4096                 }
4097         }
4098
4099         if (bp->func_stx) {
4100
4101                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4102                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4103                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4104                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4105                 dmae->dst_addr_lo = bp->func_stx >> 2;
4106                 dmae->dst_addr_hi = 0;
4107                 dmae->len = sizeof(struct host_func_stats) >> 2;
4108                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4109                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4110                 dmae->comp_val = DMAE_COMP_VAL;
4111
4112                 *stats_comp = 0;
4113         }
4114 }
4115
4116 static void bnx2x_stats_stop(struct bnx2x *bp)
4117 {
4118         int update = 0;
4119
4120         bnx2x_stats_comp(bp);
4121
4122         if (bp->port.pmf)
4123                 update = (bnx2x_hw_stats_update(bp) == 0);
4124
4125         update |= (bnx2x_storm_stats_update(bp) == 0);
4126
4127         if (update) {
4128                 bnx2x_net_stats_update(bp);
4129
4130                 if (bp->port.pmf)
4131                         bnx2x_port_stats_stop(bp);
4132
4133                 bnx2x_hw_stats_post(bp);
4134                 bnx2x_stats_comp(bp);
4135         }
4136 }
4137
4138 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4139 {
4140 }
4141
4142 static const struct {
4143         void (*action)(struct bnx2x *bp);
4144         enum bnx2x_stats_state next_state;
4145 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4146 /* state        event   */
4147 {
4148 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4149 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4150 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4151 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4152 },
4153 {
4154 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4155 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4156 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4157 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4158 }
4159 };
4160
4161 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4162 {
4163         enum bnx2x_stats_state state = bp->stats_state;
4164
4165         bnx2x_stats_stm[state][event].action(bp);
4166         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4167
4168         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4169                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4170                    state, event, bp->stats_state);
4171 }
4172
4173 static void bnx2x_timer(unsigned long data)
4174 {
4175         struct bnx2x *bp = (struct bnx2x *) data;
4176
4177         if (!netif_running(bp->dev))
4178                 return;
4179
4180         if (atomic_read(&bp->intr_sem) != 0)
4181                 goto timer_restart;
4182
4183         if (poll) {
4184                 struct bnx2x_fastpath *fp = &bp->fp[0];
4185                 int rc;
4186
4187                 bnx2x_tx_int(fp);
4188                 rc = bnx2x_rx_int(fp, 1000);
4189         }
4190
4191         if (!BP_NOMCP(bp)) {
4192                 int func = BP_FUNC(bp);
4193                 u32 drv_pulse;
4194                 u32 mcp_pulse;
4195
4196                 ++bp->fw_drv_pulse_wr_seq;
4197                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4198                 /* TBD - add SYSTEM_TIME */
4199                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4200                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4201
4202                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4203                              MCP_PULSE_SEQ_MASK);
4204                 /* The delta between driver pulse and mcp response
4205                  * should be 1 (before mcp response) or 0 (after mcp response)
4206                  */
4207                 if ((drv_pulse != mcp_pulse) &&
4208                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4209                         /* someone lost a heartbeat... */
4210                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4211                                   drv_pulse, mcp_pulse);
4212                 }
4213         }
4214
4215         if ((bp->state == BNX2X_STATE_OPEN) ||
4216             (bp->state == BNX2X_STATE_DISABLED))
4217                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4218
4219 timer_restart:
4220         mod_timer(&bp->timer, jiffies + bp->current_interval);
4221 }
4222
4223 /* end of Statistics */
4224
4225 /* nic init */
4226
4227 /*
4228  * nic init service functions
4229  */
4230
4231 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4232 {
4233         int port = BP_PORT(bp);
4234
4235         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4236                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4237                         sizeof(struct ustorm_status_block)/4);
4238         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4239                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4240                         sizeof(struct cstorm_status_block)/4);
4241 }
4242
4243 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4244                           dma_addr_t mapping, int sb_id)
4245 {
4246         int port = BP_PORT(bp);
4247         int func = BP_FUNC(bp);
4248         int index;
4249         u64 section;
4250
4251         /* USTORM */
4252         section = ((u64)mapping) + offsetof(struct host_status_block,
4253                                             u_status_block);
4254         sb->u_status_block.status_block_id = sb_id;
4255
4256         REG_WR(bp, BAR_USTRORM_INTMEM +
4257                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4258         REG_WR(bp, BAR_USTRORM_INTMEM +
4259                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4260                U64_HI(section));
4261         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4262                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4263
4264         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4265                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4266                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4267
4268         /* CSTORM */
4269         section = ((u64)mapping) + offsetof(struct host_status_block,
4270                                             c_status_block);
4271         sb->c_status_block.status_block_id = sb_id;
4272
4273         REG_WR(bp, BAR_CSTRORM_INTMEM +
4274                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4275         REG_WR(bp, BAR_CSTRORM_INTMEM +
4276                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4277                U64_HI(section));
4278         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4279                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4280
4281         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4282                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4283                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4284
4285         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4286 }
4287
4288 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4289 {
4290         int func = BP_FUNC(bp);
4291
4292         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4293                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4294                         sizeof(struct tstorm_def_status_block)/4);
4295         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4296                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4297                         sizeof(struct ustorm_def_status_block)/4);
4298         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4299                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4300                         sizeof(struct cstorm_def_status_block)/4);
4301         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4302                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4303                         sizeof(struct xstorm_def_status_block)/4);
4304 }
4305
4306 static void bnx2x_init_def_sb(struct bnx2x *bp,
4307                               struct host_def_status_block *def_sb,
4308                               dma_addr_t mapping, int sb_id)
4309 {
4310         int port = BP_PORT(bp);
4311         int func = BP_FUNC(bp);
4312         int index, val, reg_offset;
4313         u64 section;
4314
4315         /* ATTN */
4316         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4317                                             atten_status_block);
4318         def_sb->atten_status_block.status_block_id = sb_id;
4319
4320         bp->attn_state = 0;
4321
4322         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4323                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4324
4325         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4326                 bp->attn_group[index].sig[0] = REG_RD(bp,
4327                                                      reg_offset + 0x10*index);
4328                 bp->attn_group[index].sig[1] = REG_RD(bp,
4329                                                reg_offset + 0x4 + 0x10*index);
4330                 bp->attn_group[index].sig[2] = REG_RD(bp,
4331                                                reg_offset + 0x8 + 0x10*index);
4332                 bp->attn_group[index].sig[3] = REG_RD(bp,
4333                                                reg_offset + 0xc + 0x10*index);
4334         }
4335
4336         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4337                              HC_REG_ATTN_MSG0_ADDR_L);
4338
4339         REG_WR(bp, reg_offset, U64_LO(section));
4340         REG_WR(bp, reg_offset + 4, U64_HI(section));
4341
4342         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4343
4344         val = REG_RD(bp, reg_offset);
4345         val |= sb_id;
4346         REG_WR(bp, reg_offset, val);
4347
4348         /* USTORM */
4349         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4350                                             u_def_status_block);
4351         def_sb->u_def_status_block.status_block_id = sb_id;
4352
4353         REG_WR(bp, BAR_USTRORM_INTMEM +
4354                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4355         REG_WR(bp, BAR_USTRORM_INTMEM +
4356                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4357                U64_HI(section));
4358         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4359                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4360
4361         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4362                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4363                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4364
4365         /* CSTORM */
4366         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4367                                             c_def_status_block);
4368         def_sb->c_def_status_block.status_block_id = sb_id;
4369
4370         REG_WR(bp, BAR_CSTRORM_INTMEM +
4371                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4372         REG_WR(bp, BAR_CSTRORM_INTMEM +
4373                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4374                U64_HI(section));
4375         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4376                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4377
4378         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4379                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4380                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4381
4382         /* TSTORM */
4383         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4384                                             t_def_status_block);
4385         def_sb->t_def_status_block.status_block_id = sb_id;
4386
4387         REG_WR(bp, BAR_TSTRORM_INTMEM +
4388                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4389         REG_WR(bp, BAR_TSTRORM_INTMEM +
4390                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4391                U64_HI(section));
4392         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4393                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4394
4395         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4396                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4397                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4398
4399         /* XSTORM */
4400         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4401                                             x_def_status_block);
4402         def_sb->x_def_status_block.status_block_id = sb_id;
4403
4404         REG_WR(bp, BAR_XSTRORM_INTMEM +
4405                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4406         REG_WR(bp, BAR_XSTRORM_INTMEM +
4407                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4408                U64_HI(section));
4409         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4410                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4411
4412         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4413                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4414                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4415
4416         bp->stats_pending = 0;
4417         bp->set_mac_pending = 0;
4418
4419         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4420 }
4421
4422 static void bnx2x_update_coalesce(struct bnx2x *bp)
4423 {
4424         int port = BP_PORT(bp);
4425         int i;
4426
4427         for_each_queue(bp, i) {
4428                 int sb_id = bp->fp[i].sb_id;
4429
4430                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4431                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4432                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4433                                                     U_SB_ETH_RX_CQ_INDEX),
4434                         bp->rx_ticks/12);
4435                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4436                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4437                                                      U_SB_ETH_RX_CQ_INDEX),
4438                          (bp->rx_ticks/12) ? 0 : 1);
4439
4440                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4441                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4442                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4443                                                     C_SB_ETH_TX_CQ_INDEX),
4444                         bp->tx_ticks/12);
4445                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4446                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4447                                                      C_SB_ETH_TX_CQ_INDEX),
4448                          (bp->tx_ticks/12) ? 0 : 1);
4449         }
4450 }
4451
4452 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4453                                        struct bnx2x_fastpath *fp, int last)
4454 {
4455         int i;
4456
4457         for (i = 0; i < last; i++) {
4458                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4459                 struct sk_buff *skb = rx_buf->skb;
4460
4461                 if (skb == NULL) {
4462                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4463                         continue;
4464                 }
4465
4466                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4467                         pci_unmap_single(bp->pdev,
4468                                          pci_unmap_addr(rx_buf, mapping),
4469                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4470
4471                 dev_kfree_skb(skb);
4472                 rx_buf->skb = NULL;
4473         }
4474 }
4475
4476 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4477 {
4478         int func = BP_FUNC(bp);
4479         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4480                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4481         u16 ring_prod, cqe_ring_prod;
4482         int i, j;
4483
4484         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4485         DP(NETIF_MSG_IFUP,
4486            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4487
4488         if (bp->flags & TPA_ENABLE_FLAG) {
4489
4490                 for_each_rx_queue(bp, j) {
4491                         struct bnx2x_fastpath *fp = &bp->fp[j];
4492
4493                         for (i = 0; i < max_agg_queues; i++) {
4494                                 fp->tpa_pool[i].skb =
4495                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4496                                 if (!fp->tpa_pool[i].skb) {
4497                                         BNX2X_ERR("Failed to allocate TPA "
4498                                                   "skb pool for queue[%d] - "
4499                                                   "disabling TPA on this "
4500                                                   "queue!\n", j);
4501                                         bnx2x_free_tpa_pool(bp, fp, i);
4502                                         fp->disable_tpa = 1;
4503                                         break;
4504                                 }
4505                                 pci_unmap_addr_set((struct sw_rx_bd *)
4506                                                         &bp->fp->tpa_pool[i],
4507                                                    mapping, 0);
4508                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4509                         }
4510                 }
4511         }
4512
4513         for_each_rx_queue(bp, j) {
4514                 struct bnx2x_fastpath *fp = &bp->fp[j];
4515
4516                 fp->rx_bd_cons = 0;
4517                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4518                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4519
4520                 /* "next page" elements initialization */
4521                 /* SGE ring */
4522                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4523                         struct eth_rx_sge *sge;
4524
4525                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4526                         sge->addr_hi =
4527                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4528                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4529                         sge->addr_lo =
4530                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4531                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4532                 }
4533
4534                 bnx2x_init_sge_ring_bit_mask(fp);
4535
4536                 /* RX BD ring */
4537                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4538                         struct eth_rx_bd *rx_bd;
4539
4540                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4541                         rx_bd->addr_hi =
4542                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4543                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4544                         rx_bd->addr_lo =
4545                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4546                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4547                 }
4548
4549                 /* CQ ring */
4550                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4551                         struct eth_rx_cqe_next_page *nextpg;
4552
4553                         nextpg = (struct eth_rx_cqe_next_page *)
4554                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4555                         nextpg->addr_hi =
4556                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4557                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4558                         nextpg->addr_lo =
4559                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4560                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4561                 }
4562
4563                 /* Allocate SGEs and initialize the ring elements */
4564                 for (i = 0, ring_prod = 0;
4565                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4566
4567                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4568                                 BNX2X_ERR("was only able to allocate "
4569                                           "%d rx sges\n", i);
4570                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4571                                 /* Cleanup already allocated elements */
4572                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4573                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4574                                 fp->disable_tpa = 1;
4575                                 ring_prod = 0;
4576                                 break;
4577                         }
4578                         ring_prod = NEXT_SGE_IDX(ring_prod);
4579                 }
4580                 fp->rx_sge_prod = ring_prod;
4581
4582                 /* Allocate BDs and initialize BD ring */
4583                 fp->rx_comp_cons = 0;
4584                 cqe_ring_prod = ring_prod = 0;
4585                 for (i = 0; i < bp->rx_ring_size; i++) {
4586                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4587                                 BNX2X_ERR("was only able to allocate "
4588                                           "%d rx skbs on queue[%d]\n", i, j);
4589                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4590                                 break;
4591                         }
4592                         ring_prod = NEXT_RX_IDX(ring_prod);
4593                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4594                         WARN_ON(ring_prod <= i);
4595                 }
4596
4597                 fp->rx_bd_prod = ring_prod;
4598                 /* must not have more available CQEs than BDs */
4599                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4600                                        cqe_ring_prod);
4601                 fp->rx_pkt = fp->rx_calls = 0;
4602
4603                 /* Warning!
4604                  * this will generate an interrupt (to the TSTORM)
4605                  * must only be done after chip is initialized
4606                  */
4607                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4608                                      fp->rx_sge_prod);
4609                 if (j != 0)
4610                         continue;
4611
4612                 REG_WR(bp, BAR_USTRORM_INTMEM +
4613                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4614                        U64_LO(fp->rx_comp_mapping));
4615                 REG_WR(bp, BAR_USTRORM_INTMEM +
4616                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4617                        U64_HI(fp->rx_comp_mapping));
4618         }
4619 }
4620
4621 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4622 {
4623         int i, j;
4624
4625         for_each_tx_queue(bp, j) {
4626                 struct bnx2x_fastpath *fp = &bp->fp[j];
4627
4628                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4629                         struct eth_tx_bd *tx_bd =
4630                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4631
4632                         tx_bd->addr_hi =
4633                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4634                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4635                         tx_bd->addr_lo =
4636                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4637                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4638                 }
4639
4640                 fp->tx_pkt_prod = 0;
4641                 fp->tx_pkt_cons = 0;
4642                 fp->tx_bd_prod = 0;
4643                 fp->tx_bd_cons = 0;
4644                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4645                 fp->tx_pkt = 0;
4646         }
4647 }
4648
4649 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4650 {
4651         int func = BP_FUNC(bp);
4652
4653         spin_lock_init(&bp->spq_lock);
4654
4655         bp->spq_left = MAX_SPQ_PENDING;
4656         bp->spq_prod_idx = 0;
4657         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4658         bp->spq_prod_bd = bp->spq;
4659         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4660
4661         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4662                U64_LO(bp->spq_mapping));
4663         REG_WR(bp,
4664                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4665                U64_HI(bp->spq_mapping));
4666
4667         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4668                bp->spq_prod_idx);
4669 }
4670
4671 static void bnx2x_init_context(struct bnx2x *bp)
4672 {
4673         int i;
4674
4675         for_each_queue(bp, i) {
4676                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4677                 struct bnx2x_fastpath *fp = &bp->fp[i];
4678                 u8 cl_id = fp->cl_id;
4679                 u8 sb_id = fp->sb_id;
4680
4681                 context->ustorm_st_context.common.sb_index_numbers =
4682                                                 BNX2X_RX_SB_INDEX_NUM;
4683                 context->ustorm_st_context.common.clientId = cl_id;
4684                 context->ustorm_st_context.common.status_block_id = sb_id;
4685                 context->ustorm_st_context.common.flags =
4686                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4687                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4688                 context->ustorm_st_context.common.statistics_counter_id =
4689                                                 cl_id;
4690                 context->ustorm_st_context.common.mc_alignment_log_size =
4691                                                 BNX2X_RX_ALIGN_SHIFT;
4692                 context->ustorm_st_context.common.bd_buff_size =
4693                                                 bp->rx_buf_size;
4694                 context->ustorm_st_context.common.bd_page_base_hi =
4695                                                 U64_HI(fp->rx_desc_mapping);
4696                 context->ustorm_st_context.common.bd_page_base_lo =
4697                                                 U64_LO(fp->rx_desc_mapping);
4698                 if (!fp->disable_tpa) {
4699                         context->ustorm_st_context.common.flags |=
4700                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4701                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4702                         context->ustorm_st_context.common.sge_buff_size =
4703                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4704                                          (u32)0xffff);
4705                         context->ustorm_st_context.common.sge_page_base_hi =
4706                                                 U64_HI(fp->rx_sge_mapping);
4707                         context->ustorm_st_context.common.sge_page_base_lo =
4708                                                 U64_LO(fp->rx_sge_mapping);
4709                 }
4710
4711                 context->ustorm_ag_context.cdu_usage =
4712                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4713                                                CDU_REGION_NUMBER_UCM_AG,
4714                                                ETH_CONNECTION_TYPE);
4715
4716                 context->xstorm_st_context.tx_bd_page_base_hi =
4717                                                 U64_HI(fp->tx_desc_mapping);
4718                 context->xstorm_st_context.tx_bd_page_base_lo =
4719                                                 U64_LO(fp->tx_desc_mapping);
4720                 context->xstorm_st_context.db_data_addr_hi =
4721                                                 U64_HI(fp->tx_prods_mapping);
4722                 context->xstorm_st_context.db_data_addr_lo =
4723                                                 U64_LO(fp->tx_prods_mapping);
4724                 context->xstorm_st_context.statistics_data = (cl_id |
4725                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4726                 context->cstorm_st_context.sb_index_number =
4727                                                 C_SB_ETH_TX_CQ_INDEX;
4728                 context->cstorm_st_context.status_block_id = sb_id;
4729
4730                 context->xstorm_ag_context.cdu_reserved =
4731                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4732                                                CDU_REGION_NUMBER_XCM_AG,
4733                                                ETH_CONNECTION_TYPE);
4734         }
4735 }
4736
4737 static void bnx2x_init_ind_table(struct bnx2x *bp)
4738 {
4739         int func = BP_FUNC(bp);
4740         int i;
4741
4742         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4743                 return;
4744
4745         DP(NETIF_MSG_IFUP,
4746            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4747         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4748                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4749                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4750                         bp->fp->cl_id + (i % bp->num_rx_queues));
4751 }
4752
4753 static void bnx2x_set_client_config(struct bnx2x *bp)
4754 {
4755         struct tstorm_eth_client_config tstorm_client = {0};
4756         int port = BP_PORT(bp);
4757         int i;
4758
4759         tstorm_client.mtu = bp->dev->mtu;
4760         tstorm_client.config_flags =
4761                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4762                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4763 #ifdef BCM_VLAN
4764         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4765                 tstorm_client.config_flags |=
4766                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4767                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4768         }
4769 #endif
4770
4771         if (bp->flags & TPA_ENABLE_FLAG) {
4772                 tstorm_client.max_sges_for_packet =
4773                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4774                 tstorm_client.max_sges_for_packet =
4775                         ((tstorm_client.max_sges_for_packet +
4776                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4777                         PAGES_PER_SGE_SHIFT;
4778
4779                 tstorm_client.config_flags |=
4780                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4781         }
4782
4783         for_each_queue(bp, i) {
4784                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4785
4786                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4787                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4788                        ((u32 *)&tstorm_client)[0]);
4789                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4790                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4791                        ((u32 *)&tstorm_client)[1]);
4792         }
4793
4794         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4795            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4796 }
4797
4798 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4799 {
4800         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4801         int mode = bp->rx_mode;
4802         int mask = (1 << BP_L_ID(bp));
4803         int func = BP_FUNC(bp);
4804         int i;
4805
4806         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4807
4808         switch (mode) {
4809         case BNX2X_RX_MODE_NONE: /* no Rx */
4810                 tstorm_mac_filter.ucast_drop_all = mask;
4811                 tstorm_mac_filter.mcast_drop_all = mask;
4812                 tstorm_mac_filter.bcast_drop_all = mask;
4813                 break;
4814
4815         case BNX2X_RX_MODE_NORMAL:
4816                 tstorm_mac_filter.bcast_accept_all = mask;
4817                 break;
4818
4819         case BNX2X_RX_MODE_ALLMULTI:
4820                 tstorm_mac_filter.mcast_accept_all = mask;
4821                 tstorm_mac_filter.bcast_accept_all = mask;
4822                 break;
4823
4824         case BNX2X_RX_MODE_PROMISC:
4825                 tstorm_mac_filter.ucast_accept_all = mask;
4826                 tstorm_mac_filter.mcast_accept_all = mask;
4827                 tstorm_mac_filter.bcast_accept_all = mask;
4828                 break;
4829
4830         default:
4831                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4832                 break;
4833         }
4834
4835         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4836                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4837                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4838                        ((u32 *)&tstorm_mac_filter)[i]);
4839
4840 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4841                    ((u32 *)&tstorm_mac_filter)[i]); */
4842         }
4843
4844         if (mode != BNX2X_RX_MODE_NONE)
4845                 bnx2x_set_client_config(bp);
4846 }
4847
4848 static void bnx2x_init_internal_common(struct bnx2x *bp)
4849 {
4850         int i;
4851
4852         if (bp->flags & TPA_ENABLE_FLAG) {
4853                 struct tstorm_eth_tpa_exist tpa = {0};
4854
4855                 tpa.tpa_exist = 1;
4856
4857                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4858                        ((u32 *)&tpa)[0]);
4859                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4860                        ((u32 *)&tpa)[1]);
4861         }
4862
4863         /* Zero this manually as its initialization is
4864            currently missing in the initTool */
4865         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4866                 REG_WR(bp, BAR_USTRORM_INTMEM +
4867                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4868 }
4869
4870 static void bnx2x_init_internal_port(struct bnx2x *bp)
4871 {
4872         int port = BP_PORT(bp);
4873
4874         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4875         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4876         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4877         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4878 }
4879
4880 /* Calculates the sum of vn_min_rates.
4881    It's needed for further normalizing of the min_rates.
4882    Returns:
4883      sum of vn_min_rates.
4884        or
4885      0 - if all the min_rates are 0.
4886      In the later case fainess algorithm should be deactivated.
4887      If not all min_rates are zero then those that are zeroes will be set to 1.
4888  */
4889 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4890 {
4891         int all_zero = 1;
4892         int port = BP_PORT(bp);
4893         int vn;
4894
4895         bp->vn_weight_sum = 0;
4896         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4897                 int func = 2*vn + port;
4898                 u32 vn_cfg =
4899                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4900                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4901                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4902
4903                 /* Skip hidden vns */
4904                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4905                         continue;
4906
4907                 /* If min rate is zero - set it to 1 */
4908                 if (!vn_min_rate)
4909                         vn_min_rate = DEF_MIN_RATE;
4910                 else
4911                         all_zero = 0;
4912
4913                 bp->vn_weight_sum += vn_min_rate;
4914         }
4915
4916         /* ... only if all min rates are zeros - disable fairness */
4917         if (all_zero)
4918                 bp->vn_weight_sum = 0;
4919 }
4920
4921 static void bnx2x_init_internal_func(struct bnx2x *bp)
4922 {
4923         struct tstorm_eth_function_common_config tstorm_config = {0};
4924         struct stats_indication_flags stats_flags = {0};
4925         int port = BP_PORT(bp);
4926         int func = BP_FUNC(bp);
4927         int i, j;
4928         u32 offset;
4929         u16 max_agg_size;
4930
4931         if (is_multi(bp)) {
4932                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4933                 tstorm_config.rss_result_mask = MULTI_MASK;
4934         }
4935         if (IS_E1HMF(bp))
4936                 tstorm_config.config_flags |=
4937                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4938
4939         tstorm_config.leading_client_id = BP_L_ID(bp);
4940
4941         REG_WR(bp, BAR_TSTRORM_INTMEM +
4942                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4943                (*(u32 *)&tstorm_config));
4944
4945         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4946         bnx2x_set_storm_rx_mode(bp);
4947
4948         for_each_queue(bp, i) {
4949                 u8 cl_id = bp->fp[i].cl_id;
4950
4951                 /* reset xstorm per client statistics */
4952                 offset = BAR_XSTRORM_INTMEM +
4953                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4954                 for (j = 0;
4955                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4956                         REG_WR(bp, offset + j*4, 0);
4957
4958                 /* reset tstorm per client statistics */
4959                 offset = BAR_TSTRORM_INTMEM +
4960                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4961                 for (j = 0;
4962                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4963                         REG_WR(bp, offset + j*4, 0);
4964
4965                 /* reset ustorm per client statistics */
4966                 offset = BAR_USTRORM_INTMEM +
4967                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4968                 for (j = 0;
4969                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4970                         REG_WR(bp, offset + j*4, 0);
4971         }
4972
4973         /* Init statistics related context */
4974         stats_flags.collect_eth = 1;
4975
4976         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4977                ((u32 *)&stats_flags)[0]);
4978         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4979                ((u32 *)&stats_flags)[1]);
4980
4981         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4982                ((u32 *)&stats_flags)[0]);
4983         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4984                ((u32 *)&stats_flags)[1]);
4985
4986         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4987                ((u32 *)&stats_flags)[0]);
4988         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4989                ((u32 *)&stats_flags)[1]);
4990
4991         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4992                ((u32 *)&stats_flags)[0]);
4993         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4994                ((u32 *)&stats_flags)[1]);
4995
4996         REG_WR(bp, BAR_XSTRORM_INTMEM +
4997                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4998                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4999         REG_WR(bp, BAR_XSTRORM_INTMEM +
5000                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5001                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5002
5003         REG_WR(bp, BAR_TSTRORM_INTMEM +
5004                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5005                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5006         REG_WR(bp, BAR_TSTRORM_INTMEM +
5007                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5008                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5009
5010         REG_WR(bp, BAR_USTRORM_INTMEM +
5011                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5012                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5013         REG_WR(bp, BAR_USTRORM_INTMEM +
5014                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5015                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5016
5017         if (CHIP_IS_E1H(bp)) {
5018                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5019                         IS_E1HMF(bp));
5020                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5021                         IS_E1HMF(bp));
5022                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5023                         IS_E1HMF(bp));
5024                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5025                         IS_E1HMF(bp));
5026
5027                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5028                          bp->e1hov);
5029         }
5030
5031         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5032         max_agg_size =
5033                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5034                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5035                     (u32)0xffff);
5036         for_each_rx_queue(bp, i) {
5037                 struct bnx2x_fastpath *fp = &bp->fp[i];
5038
5039                 REG_WR(bp, BAR_USTRORM_INTMEM +
5040                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5041                        U64_LO(fp->rx_comp_mapping));
5042                 REG_WR(bp, BAR_USTRORM_INTMEM +
5043                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5044                        U64_HI(fp->rx_comp_mapping));
5045
5046                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5047                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5048                          max_agg_size);
5049         }
5050
5051         /* dropless flow control */
5052         if (CHIP_IS_E1H(bp)) {
5053                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5054
5055                 rx_pause.bd_thr_low = 250;
5056                 rx_pause.cqe_thr_low = 250;
5057                 rx_pause.cos = 1;
5058                 rx_pause.sge_thr_low = 0;
5059                 rx_pause.bd_thr_high = 350;
5060                 rx_pause.cqe_thr_high = 350;
5061                 rx_pause.sge_thr_high = 0;
5062
5063                 for_each_rx_queue(bp, i) {
5064                         struct bnx2x_fastpath *fp = &bp->fp[i];
5065
5066                         if (!fp->disable_tpa) {
5067                                 rx_pause.sge_thr_low = 150;
5068                                 rx_pause.sge_thr_high = 250;
5069                         }
5070
5071
5072                         offset = BAR_USTRORM_INTMEM +
5073                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5074                                                                    fp->cl_id);
5075                         for (j = 0;
5076                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5077                              j++)
5078                                 REG_WR(bp, offset + j*4,
5079                                        ((u32 *)&rx_pause)[j]);
5080                 }
5081         }
5082
5083         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5084
5085         /* Init rate shaping and fairness contexts */
5086         if (IS_E1HMF(bp)) {
5087                 int vn;
5088
5089                 /* During init there is no active link
5090                    Until link is up, set link rate to 10Gbps */
5091                 bp->link_vars.line_speed = SPEED_10000;
5092                 bnx2x_init_port_minmax(bp);
5093
5094                 bnx2x_calc_vn_weight_sum(bp);
5095
5096                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5097                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5098
5099                 /* Enable rate shaping and fairness */
5100                 bp->cmng.flags.cmng_enables =
5101                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5102                 if (bp->vn_weight_sum)
5103                         bp->cmng.flags.cmng_enables |=
5104                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5105                 else
5106                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5107                            "  fairness will be disabled\n");
5108         } else {
5109                 /* rate shaping and fairness are disabled */
5110                 DP(NETIF_MSG_IFUP,
5111                    "single function mode  minmax will be disabled\n");
5112         }
5113
5114
5115         /* Store it to internal memory */
5116         if (bp->port.pmf)
5117                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5118                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5119                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5120                                ((u32 *)(&bp->cmng))[i]);
5121 }
5122
5123 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5124 {
5125         switch (load_code) {
5126         case FW_MSG_CODE_DRV_LOAD_COMMON:
5127                 bnx2x_init_internal_common(bp);
5128                 /* no break */
5129
5130         case FW_MSG_CODE_DRV_LOAD_PORT:
5131                 bnx2x_init_internal_port(bp);
5132                 /* no break */
5133
5134         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5135                 bnx2x_init_internal_func(bp);
5136                 break;
5137
5138         default:
5139                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5140                 break;
5141         }
5142 }
5143
5144 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5145 {
5146         int i;
5147
5148         for_each_queue(bp, i) {
5149                 struct bnx2x_fastpath *fp = &bp->fp[i];
5150
5151                 fp->bp = bp;
5152                 fp->state = BNX2X_FP_STATE_CLOSED;
5153                 fp->index = i;
5154                 fp->cl_id = BP_L_ID(bp) + i;
5155                 fp->sb_id = fp->cl_id;
5156                 DP(NETIF_MSG_IFUP,
5157                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5158                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5159                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5160                               fp->sb_id);
5161                 bnx2x_update_fpsb_idx(fp);
5162         }
5163
5164         /* ensure status block indices were read */
5165         rmb();
5166
5167
5168         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5169                           DEF_SB_ID);
5170         bnx2x_update_dsb_idx(bp);
5171         bnx2x_update_coalesce(bp);
5172         bnx2x_init_rx_rings(bp);
5173         bnx2x_init_tx_ring(bp);
5174         bnx2x_init_sp_ring(bp);
5175         bnx2x_init_context(bp);
5176         bnx2x_init_internal(bp, load_code);
5177         bnx2x_init_ind_table(bp);
5178         bnx2x_stats_init(bp);
5179
5180         /* At this point, we are ready for interrupts */
5181         atomic_set(&bp->intr_sem, 0);
5182
5183         /* flush all before enabling interrupts */
5184         mb();
5185         mmiowb();
5186
5187         bnx2x_int_enable(bp);
5188 }
5189
5190 /* end of nic init */
5191
5192 /*
5193  * gzip service functions
5194  */
5195
5196 static int bnx2x_gunzip_init(struct bnx2x *bp)
5197 {
5198         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5199                                               &bp->gunzip_mapping);
5200         if (bp->gunzip_buf  == NULL)
5201                 goto gunzip_nomem1;
5202
5203         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5204         if (bp->strm  == NULL)
5205                 goto gunzip_nomem2;
5206
5207         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5208                                       GFP_KERNEL);
5209         if (bp->strm->workspace == NULL)
5210                 goto gunzip_nomem3;
5211
5212         return 0;
5213
5214 gunzip_nomem3:
5215         kfree(bp->strm);
5216         bp->strm = NULL;
5217
5218 gunzip_nomem2:
5219         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5220                             bp->gunzip_mapping);
5221         bp->gunzip_buf = NULL;
5222
5223 gunzip_nomem1:
5224         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5225                " un-compression\n", bp->dev->name);
5226         return -ENOMEM;
5227 }
5228
5229 static void bnx2x_gunzip_end(struct bnx2x *bp)
5230 {
5231         kfree(bp->strm->workspace);
5232
5233         kfree(bp->strm);
5234         bp->strm = NULL;
5235
5236         if (bp->gunzip_buf) {
5237                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5238                                     bp->gunzip_mapping);
5239                 bp->gunzip_buf = NULL;
5240         }
5241 }
5242
5243 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5244 {
5245         int n, rc;
5246
5247         /* check gzip header */
5248         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5249                 BNX2X_ERR("Bad gzip header\n");
5250                 return -EINVAL;
5251         }
5252
5253         n = 10;
5254
5255 #define FNAME                           0x8
5256
5257         if (zbuf[3] & FNAME)
5258                 while ((zbuf[n++] != 0) && (n < len));
5259
5260         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5261         bp->strm->avail_in = len - n;
5262         bp->strm->next_out = bp->gunzip_buf;
5263         bp->strm->avail_out = FW_BUF_SIZE;
5264
5265         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5266         if (rc != Z_OK)
5267                 return rc;
5268
5269         rc = zlib_inflate(bp->strm, Z_FINISH);
5270         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5271                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5272                        bp->dev->name, bp->strm->msg);
5273
5274         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5275         if (bp->gunzip_outlen & 0x3)
5276                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5277                                     " gunzip_outlen (%d) not aligned\n",
5278                        bp->dev->name, bp->gunzip_outlen);
5279         bp->gunzip_outlen >>= 2;
5280
5281         zlib_inflateEnd(bp->strm);
5282
5283         if (rc == Z_STREAM_END)
5284                 return 0;
5285
5286         return rc;
5287 }
5288
5289 /* nic load/unload */
5290
5291 /*
5292  * General service functions
5293  */
5294
5295 /* send a NIG loopback debug packet */
5296 static void bnx2x_lb_pckt(struct bnx2x *bp)
5297 {
5298         u32 wb_write[3];
5299
5300         /* Ethernet source and destination addresses */
5301         wb_write[0] = 0x55555555;
5302         wb_write[1] = 0x55555555;
5303         wb_write[2] = 0x20;             /* SOP */
5304         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5305
5306         /* NON-IP protocol */
5307         wb_write[0] = 0x09000000;
5308         wb_write[1] = 0x55555555;
5309         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5310         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5311 }
5312
5313 /* some of the internal memories
5314  * are not directly readable from the driver
5315  * to test them we send debug packets
5316  */
5317 static int bnx2x_int_mem_test(struct bnx2x *bp)
5318 {
5319         int factor;
5320         int count, i;
5321         u32 val = 0;
5322
5323         if (CHIP_REV_IS_FPGA(bp))
5324                 factor = 120;
5325         else if (CHIP_REV_IS_EMUL(bp))
5326                 factor = 200;
5327         else
5328                 factor = 1;
5329
5330         DP(NETIF_MSG_HW, "start part1\n");
5331
5332         /* Disable inputs of parser neighbor blocks */
5333         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5334         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5335         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5336         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5337
5338         /*  Write 0 to parser credits for CFC search request */
5339         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5340
5341         /* send Ethernet packet */
5342         bnx2x_lb_pckt(bp);
5343
5344         /* TODO do i reset NIG statistic? */
5345         /* Wait until NIG register shows 1 packet of size 0x10 */
5346         count = 1000 * factor;
5347         while (count) {
5348
5349                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5350                 val = *bnx2x_sp(bp, wb_data[0]);
5351                 if (val == 0x10)
5352                         break;
5353
5354                 msleep(10);
5355                 count--;
5356         }
5357         if (val != 0x10) {
5358                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5359                 return -1;
5360         }
5361
5362         /* Wait until PRS register shows 1 packet */
5363         count = 1000 * factor;
5364         while (count) {
5365                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5366                 if (val == 1)
5367                         break;
5368
5369                 msleep(10);
5370                 count--;
5371         }
5372         if (val != 0x1) {
5373                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5374                 return -2;
5375         }
5376
5377         /* Reset and init BRB, PRS */
5378         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5379         msleep(50);
5380         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5381         msleep(50);
5382         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5383         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5384
5385         DP(NETIF_MSG_HW, "part2\n");
5386
5387         /* Disable inputs of parser neighbor blocks */
5388         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5389         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5390         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5391         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5392
5393         /* Write 0 to parser credits for CFC search request */
5394         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5395
5396         /* send 10 Ethernet packets */
5397         for (i = 0; i < 10; i++)
5398                 bnx2x_lb_pckt(bp);
5399
5400         /* Wait until NIG register shows 10 + 1
5401            packets of size 11*0x10 = 0xb0 */
5402         count = 1000 * factor;
5403         while (count) {
5404
5405                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5406                 val = *bnx2x_sp(bp, wb_data[0]);
5407                 if (val == 0xb0)
5408                         break;
5409
5410                 msleep(10);
5411                 count--;
5412         }
5413         if (val != 0xb0) {
5414                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5415                 return -3;
5416         }
5417
5418         /* Wait until PRS register shows 2 packets */
5419         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5420         if (val != 2)
5421                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5422
5423         /* Write 1 to parser credits for CFC search request */
5424         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5425
5426         /* Wait until PRS register shows 3 packets */
5427         msleep(10 * factor);
5428         /* Wait until NIG register shows 1 packet of size 0x10 */
5429         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5430         if (val != 3)
5431                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5432
5433         /* clear NIG EOP FIFO */
5434         for (i = 0; i < 11; i++)
5435                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5436         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5437         if (val != 1) {
5438                 BNX2X_ERR("clear of NIG failed\n");
5439                 return -4;
5440         }
5441
5442         /* Reset and init BRB, PRS, NIG */
5443         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5444         msleep(50);
5445         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5446         msleep(50);
5447         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5448         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5449 #ifndef BCM_ISCSI
5450         /* set NIC mode */
5451         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5452 #endif
5453
5454         /* Enable inputs of parser neighbor blocks */
5455         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5456         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5457         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5458         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5459
5460         DP(NETIF_MSG_HW, "done\n");
5461
5462         return 0; /* OK */
5463 }
5464
5465 static void enable_blocks_attention(struct bnx2x *bp)
5466 {
5467         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5468         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5469         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5470         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5471         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5472         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5473         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5474         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5475         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5476 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5477 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5478         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5479         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5480         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5481 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5482 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5483         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5484         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5485         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5486         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5487 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5488 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5489         if (CHIP_REV_IS_FPGA(bp))
5490                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5491         else
5492                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5493         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5494         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5495         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5496 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5497 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5498         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5499         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5500 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5501         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5502 }
5503
5504
5505 static void bnx2x_reset_common(struct bnx2x *bp)
5506 {
5507         /* reset_common */
5508         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5509                0xd3ffff7f);
5510         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5511 }
5512
5513 static int bnx2x_init_common(struct bnx2x *bp)
5514 {
5515         u32 val, i;
5516
5517         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5518
5519         bnx2x_reset_common(bp);
5520         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5521         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5522
5523         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5524         if (CHIP_IS_E1H(bp))
5525                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5526
5527         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5528         msleep(30);
5529         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5530
5531         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5532         if (CHIP_IS_E1(bp)) {
5533                 /* enable HW interrupt from PXP on USDM overflow
5534                    bit 16 on INT_MASK_0 */
5535                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5536         }
5537
5538         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5539         bnx2x_init_pxp(bp);
5540
5541 #ifdef __BIG_ENDIAN
5542         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5543         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5544         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5545         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5546         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5547         /* make sure this value is 0 */
5548         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5549
5550 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5551         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5552         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5553         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5554         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5555 #endif
5556
5557         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5558 #ifdef BCM_ISCSI
5559         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5560         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5561         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5562 #endif
5563
5564         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5565                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5566
5567         /* let the HW do it's magic ... */
5568         msleep(100);
5569         /* finish PXP init */
5570         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5571         if (val != 1) {
5572                 BNX2X_ERR("PXP2 CFG failed\n");
5573                 return -EBUSY;
5574         }
5575         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5576         if (val != 1) {
5577                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5578                 return -EBUSY;
5579         }
5580
5581         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5582         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5583
5584         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5585
5586         /* clean the DMAE memory */
5587         bp->dmae_ready = 1;
5588         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5589
5590         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5591         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5592         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5593         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5594
5595         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5596         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5597         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5598         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5599
5600         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5601         /* soft reset pulse */
5602         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5603         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5604
5605 #ifdef BCM_ISCSI
5606         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5607 #endif
5608
5609         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5610         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5611         if (!CHIP_REV_IS_SLOW(bp)) {
5612                 /* enable hw interrupt from doorbell Q */
5613                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5614         }
5615
5616         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5617         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5618         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5619         /* set NIC mode */
5620         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5621         if (CHIP_IS_E1H(bp))
5622                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5623
5624         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5625         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5626         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5627         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5628
5629         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5630         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5631         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5632         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5633
5634         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5635         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5636         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5637         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5638
5639         /* sync semi rtc */
5640         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5641                0x80000000);
5642         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5643                0x80000000);
5644
5645         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5646         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5647         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5648
5649         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5650         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5651                 REG_WR(bp, i, 0xc0cac01a);
5652                 /* TODO: replace with something meaningful */
5653         }
5654         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5655         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5656
5657         if (sizeof(union cdu_context) != 1024)
5658                 /* we currently assume that a context is 1024 bytes */
5659                 printk(KERN_ALERT PFX "please adjust the size of"
5660                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5661
5662         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5663         val = (4 << 24) + (0 << 12) + 1024;
5664         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5665         if (CHIP_IS_E1(bp)) {
5666                 /* !!! fix pxp client crdit until excel update */
5667                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5668                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5669         }
5670
5671         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5672         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5673         /* enable context validation interrupt from CFC */
5674         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5675
5676         /* set the thresholds to prevent CFC/CDU race */
5677         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5678
5679         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5680         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5681
5682         /* PXPCS COMMON comes here */
5683         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5684         /* Reset PCIE errors for debug */
5685         REG_WR(bp, 0x2814, 0xffffffff);
5686         REG_WR(bp, 0x3820, 0xffffffff);
5687
5688         /* EMAC0 COMMON comes here */
5689         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5690         /* EMAC1 COMMON comes here */
5691         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5692         /* DBU COMMON comes here */
5693         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5694         /* DBG COMMON comes here */
5695         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5696
5697         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5698         if (CHIP_IS_E1H(bp)) {
5699                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5700                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5701         }
5702
5703         if (CHIP_REV_IS_SLOW(bp))
5704                 msleep(200);
5705
5706         /* finish CFC init */
5707         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5708         if (val != 1) {
5709                 BNX2X_ERR("CFC LL_INIT failed\n");
5710                 return -EBUSY;
5711         }
5712         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5713         if (val != 1) {
5714                 BNX2X_ERR("CFC AC_INIT failed\n");
5715                 return -EBUSY;
5716         }
5717         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5718         if (val != 1) {
5719                 BNX2X_ERR("CFC CAM_INIT failed\n");
5720                 return -EBUSY;
5721         }
5722         REG_WR(bp, CFC_REG_DEBUG0, 0);
5723
5724         /* read NIG statistic
5725            to see if this is our first up since powerup */
5726         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5727         val = *bnx2x_sp(bp, wb_data[0]);
5728
5729         /* do internal memory self test */
5730         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5731                 BNX2X_ERR("internal mem self test failed\n");
5732                 return -EBUSY;
5733         }
5734
5735         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5736         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5737         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5738         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5739                 bp->port.need_hw_lock = 1;
5740                 break;
5741
5742         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5743                 /* Fan failure is indicated by SPIO 5 */
5744                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5745                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5746
5747                 /* set to active low mode */
5748                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5749                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5750                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5751                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5752
5753                 /* enable interrupt to signal the IGU */
5754                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5755                 val |= (1 << MISC_REGISTERS_SPIO_5);
5756                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5757                 break;
5758
5759         default:
5760                 break;
5761         }
5762
5763         /* clear PXP2 attentions */
5764         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5765
5766         enable_blocks_attention(bp);
5767
5768         if (!BP_NOMCP(bp)) {
5769                 bnx2x_acquire_phy_lock(bp);
5770                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5771                 bnx2x_release_phy_lock(bp);
5772         } else
5773                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5774
5775         return 0;
5776 }
5777
5778 static int bnx2x_init_port(struct bnx2x *bp)
5779 {
5780         int port = BP_PORT(bp);
5781         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5782         u32 low, high;
5783         u32 val;
5784
5785         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5786
5787         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5788
5789         /* Port PXP comes here */
5790         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5791         /* Port PXP2 comes here */
5792         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5793 #ifdef BCM_ISCSI
5794         /* Port0  1
5795          * Port1  385 */
5796         i++;
5797         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5798         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5799         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5800         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5801
5802         /* Port0  2
5803          * Port1  386 */
5804         i++;
5805         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5806         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5807         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5808         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5809
5810         /* Port0  3
5811          * Port1  387 */
5812         i++;
5813         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5814         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5815         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5816         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5817 #endif
5818         /* Port CMs come here */
5819         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5820
5821         /* Port QM comes here */
5822 #ifdef BCM_ISCSI
5823         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5824         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5825
5826         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5827 #endif
5828         /* Port DQ comes here */
5829         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5830
5831         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5832         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5833                 /* no pause for emulation and FPGA */
5834                 low = 0;
5835                 high = 513;
5836         } else {
5837                 if (IS_E1HMF(bp))
5838                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5839                 else if (bp->dev->mtu > 4096) {
5840                         if (bp->flags & ONE_PORT_FLAG)
5841                                 low = 160;
5842                         else {
5843                                 val = bp->dev->mtu;
5844                                 /* (24*1024 + val*4)/256 */
5845                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5846                         }
5847                 } else
5848                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5849                 high = low + 56;        /* 14*1024/256 */
5850         }
5851         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5852         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5853
5854
5855         /* Port PRS comes here */
5856         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5857         /* Port TSDM comes here */
5858         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5859         /* Port CSDM comes here */
5860         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5861         /* Port USDM comes here */
5862         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5863         /* Port XSDM comes here */
5864         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5865
5866         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5867         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5868         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5869         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5870
5871         /* Port UPB comes here */
5872         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5873         /* Port XPB comes here */
5874         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5875
5876         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5877
5878         /* configure PBF to work without PAUSE mtu 9000 */
5879         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5880
5881         /* update threshold */
5882         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5883         /* update init credit */
5884         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5885
5886         /* probe changes */
5887         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5888         msleep(5);
5889         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5890
5891 #ifdef BCM_ISCSI
5892         /* tell the searcher where the T2 table is */
5893         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5894
5895         wb_write[0] = U64_LO(bp->t2_mapping);
5896         wb_write[1] = U64_HI(bp->t2_mapping);
5897         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5898         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5899         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5900         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5901
5902         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5903         /* Port SRCH comes here */
5904 #endif
5905         /* Port CDU comes here */
5906         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5907         /* Port CFC comes here */
5908         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5909
5910         if (CHIP_IS_E1(bp)) {
5911                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5912                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5913         }
5914         bnx2x_init_block(bp, HC_BLOCK, init_stage);
5915
5916         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5917         /* init aeu_mask_attn_func_0/1:
5918          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5919          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5920          *             bits 4-7 are used for "per vn group attention" */
5921         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5922                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5923
5924         /* Port PXPCS comes here */
5925         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5926         /* Port EMAC0 comes here */
5927         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5928         /* Port EMAC1 comes here */
5929         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5930         /* Port DBU comes here */
5931         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5932         /* Port DBG comes here */
5933         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5934
5935         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5936
5937         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5938
5939         if (CHIP_IS_E1H(bp)) {
5940                 /* 0x2 disable e1hov, 0x1 enable */
5941                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5942                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5943
5944                 /* support pause requests from USDM, TSDM and BRB */
5945                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5946
5947                 {
5948                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5949                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5950                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5951                 }
5952         }
5953
5954         /* Port MCP comes here */
5955         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5956         /* Port DMAE comes here */
5957         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5958
5959         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5960         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5961                 {
5962                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5963
5964                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5965                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5966
5967                 /* The GPIO should be swapped if the swap register is
5968                    set and active */
5969                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5970                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5971
5972                 /* Select function upon port-swap configuration */
5973                 if (port == 0) {
5974                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5975                         aeu_gpio_mask = (swap_val && swap_override) ?
5976                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5977                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5978                 } else {
5979                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5980                         aeu_gpio_mask = (swap_val && swap_override) ?
5981                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5982                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5983                 }
5984                 val = REG_RD(bp, offset);
5985                 /* add GPIO3 to group */
5986                 val |= aeu_gpio_mask;
5987                 REG_WR(bp, offset, val);
5988                 }
5989                 break;
5990
5991         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5992                 /* add SPIO 5 to group 0 */
5993                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5994                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5995                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5996                 break;
5997
5998         default:
5999                 break;
6000         }
6001
6002         bnx2x__link_reset(bp);
6003
6004         return 0;
6005 }
6006
6007 #define ILT_PER_FUNC            (768/2)
6008 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6009 /* the phys address is shifted right 12 bits and has an added
6010    1=valid bit added to the 53rd bit
6011    then since this is a wide register(TM)
6012    we split it into two 32 bit writes
6013  */
6014 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6015 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6016 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6017 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6018
6019 #define CNIC_ILT_LINES          0
6020
6021 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6022 {
6023         int reg;
6024
6025         if (CHIP_IS_E1H(bp))
6026                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6027         else /* E1 */
6028                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6029
6030         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6031 }
6032
6033 static int bnx2x_init_func(struct bnx2x *bp)
6034 {
6035         int port = BP_PORT(bp);
6036         int func = BP_FUNC(bp);
6037         u32 addr, val;
6038         int i;
6039
6040         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6041
6042         /* set MSI reconfigure capability */
6043         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6044         val = REG_RD(bp, addr);
6045         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6046         REG_WR(bp, addr, val);
6047
6048         i = FUNC_ILT_BASE(func);
6049
6050         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6051         if (CHIP_IS_E1H(bp)) {
6052                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6053                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6054         } else /* E1 */
6055                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6056                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6057
6058
6059         if (CHIP_IS_E1H(bp)) {
6060                 for (i = 0; i < 9; i++)
6061                         bnx2x_init_block(bp,
6062                                          cm_blocks[i], FUNC0_STAGE + func);
6063
6064                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6065                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6066         }
6067
6068         /* HC init per function */
6069         if (CHIP_IS_E1H(bp)) {
6070                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6071
6072                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6073                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6074         }
6075         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6076
6077         /* Reset PCIE errors for debug */
6078         REG_WR(bp, 0x2114, 0xffffffff);
6079         REG_WR(bp, 0x2120, 0xffffffff);
6080
6081         return 0;
6082 }
6083
6084 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6085 {
6086         int i, rc = 0;
6087
6088         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6089            BP_FUNC(bp), load_code);
6090
6091         bp->dmae_ready = 0;
6092         mutex_init(&bp->dmae_mutex);
6093         bnx2x_gunzip_init(bp);
6094
6095         switch (load_code) {
6096         case FW_MSG_CODE_DRV_LOAD_COMMON:
6097                 rc = bnx2x_init_common(bp);
6098                 if (rc)
6099                         goto init_hw_err;
6100                 /* no break */
6101
6102         case FW_MSG_CODE_DRV_LOAD_PORT:
6103                 bp->dmae_ready = 1;
6104                 rc = bnx2x_init_port(bp);
6105                 if (rc)
6106                         goto init_hw_err;
6107                 /* no break */
6108
6109         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6110                 bp->dmae_ready = 1;
6111                 rc = bnx2x_init_func(bp);
6112                 if (rc)
6113                         goto init_hw_err;
6114                 break;
6115
6116         default:
6117                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6118                 break;
6119         }
6120
6121         if (!BP_NOMCP(bp)) {
6122                 int func = BP_FUNC(bp);
6123
6124                 bp->fw_drv_pulse_wr_seq =
6125                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6126                                  DRV_PULSE_SEQ_MASK);
6127                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6128                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
6129                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
6130         } else
6131                 bp->func_stx = 0;
6132
6133         /* this needs to be done before gunzip end */
6134         bnx2x_zero_def_sb(bp);
6135         for_each_queue(bp, i)
6136                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6137
6138 init_hw_err:
6139         bnx2x_gunzip_end(bp);
6140
6141         return rc;
6142 }
6143
6144 /* send the MCP a request, block until there is a reply */
6145 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6146 {
6147         int func = BP_FUNC(bp);
6148         u32 seq = ++bp->fw_seq;
6149         u32 rc = 0;
6150         u32 cnt = 1;
6151         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6152
6153         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6154         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6155
6156         do {
6157                 /* let the FW do it's magic ... */
6158                 msleep(delay);
6159
6160                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6161
6162                 /* Give the FW up to 2 second (200*10ms) */
6163         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6164
6165         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6166            cnt*delay, rc, seq);
6167
6168         /* is this a reply to our command? */
6169         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6170                 rc &= FW_MSG_CODE_MASK;
6171
6172         } else {
6173                 /* FW BUG! */
6174                 BNX2X_ERR("FW failed to respond!\n");
6175                 bnx2x_fw_dump(bp);
6176                 rc = 0;
6177         }
6178
6179         return rc;
6180 }
6181
6182 static void bnx2x_free_mem(struct bnx2x *bp)
6183 {
6184
6185 #define BNX2X_PCI_FREE(x, y, size) \
6186         do { \
6187                 if (x) { \
6188                         pci_free_consistent(bp->pdev, size, x, y); \
6189                         x = NULL; \
6190                         y = 0; \
6191                 } \
6192         } while (0)
6193
6194 #define BNX2X_FREE(x) \
6195         do { \
6196                 if (x) { \
6197                         vfree(x); \
6198                         x = NULL; \
6199                 } \
6200         } while (0)
6201
6202         int i;
6203
6204         /* fastpath */
6205         /* Common */
6206         for_each_queue(bp, i) {
6207
6208                 /* status blocks */
6209                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6210                                bnx2x_fp(bp, i, status_blk_mapping),
6211                                sizeof(struct host_status_block) +
6212                                sizeof(struct eth_tx_db_data));
6213         }
6214         /* Rx */
6215         for_each_rx_queue(bp, i) {
6216
6217                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6218                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6219                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6220                                bnx2x_fp(bp, i, rx_desc_mapping),
6221                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6222
6223                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6224                                bnx2x_fp(bp, i, rx_comp_mapping),
6225                                sizeof(struct eth_fast_path_rx_cqe) *
6226                                NUM_RCQ_BD);
6227
6228                 /* SGE ring */
6229                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6230                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6231                                bnx2x_fp(bp, i, rx_sge_mapping),
6232                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6233         }
6234         /* Tx */
6235         for_each_tx_queue(bp, i) {
6236
6237                 /* fastpath tx rings: tx_buf tx_desc */
6238                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6239                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6240                                bnx2x_fp(bp, i, tx_desc_mapping),
6241                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
6242         }
6243         /* end of fastpath */
6244
6245         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6246                        sizeof(struct host_def_status_block));
6247
6248         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6249                        sizeof(struct bnx2x_slowpath));
6250
6251 #ifdef BCM_ISCSI
6252         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6253         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6254         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6255         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6256 #endif
6257         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6258
6259 #undef BNX2X_PCI_FREE
6260 #undef BNX2X_KFREE
6261 }
6262
6263 static int bnx2x_alloc_mem(struct bnx2x *bp)
6264 {
6265
6266 #define BNX2X_PCI_ALLOC(x, y, size) \
6267         do { \
6268                 x = pci_alloc_consistent(bp->pdev, size, y); \
6269                 if (x == NULL) \
6270                         goto alloc_mem_err; \
6271                 memset(x, 0, size); \
6272         } while (0)
6273
6274 #define BNX2X_ALLOC(x, size) \
6275         do { \
6276                 x = vmalloc(size); \
6277                 if (x == NULL) \
6278                         goto alloc_mem_err; \
6279                 memset(x, 0, size); \
6280         } while (0)
6281
6282         int i;
6283
6284         /* fastpath */
6285         /* Common */
6286         for_each_queue(bp, i) {
6287                 bnx2x_fp(bp, i, bp) = bp;
6288
6289                 /* status blocks */
6290                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6291                                 &bnx2x_fp(bp, i, status_blk_mapping),
6292                                 sizeof(struct host_status_block) +
6293                                 sizeof(struct eth_tx_db_data));
6294         }
6295         /* Rx */
6296         for_each_rx_queue(bp, i) {
6297
6298                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6299                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6300                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6301                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6302                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6303                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6304
6305                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6306                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6307                                 sizeof(struct eth_fast_path_rx_cqe) *
6308                                 NUM_RCQ_BD);
6309
6310                 /* SGE ring */
6311                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6312                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6313                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6314                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6315                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6316         }
6317         /* Tx */
6318         for_each_tx_queue(bp, i) {
6319
6320                 bnx2x_fp(bp, i, hw_tx_prods) =
6321                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6322
6323                 bnx2x_fp(bp, i, tx_prods_mapping) =
6324                                 bnx2x_fp(bp, i, status_blk_mapping) +
6325                                 sizeof(struct host_status_block);
6326
6327                 /* fastpath tx rings: tx_buf tx_desc */
6328                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6329                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6330                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6331                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6332                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6333         }
6334         /* end of fastpath */
6335
6336         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6337                         sizeof(struct host_def_status_block));
6338
6339         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6340                         sizeof(struct bnx2x_slowpath));
6341
6342 #ifdef BCM_ISCSI
6343         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6344
6345         /* Initialize T1 */
6346         for (i = 0; i < 64*1024; i += 64) {
6347                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6348                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6349         }
6350
6351         /* allocate searcher T2 table
6352            we allocate 1/4 of alloc num for T2
6353           (which is not entered into the ILT) */
6354         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6355
6356         /* Initialize T2 */
6357         for (i = 0; i < 16*1024; i += 64)
6358                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6359
6360         /* now fixup the last line in the block to point to the next block */
6361         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6362
6363         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6364         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6365
6366         /* QM queues (128*MAX_CONN) */
6367         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6368 #endif
6369
6370         /* Slow path ring */
6371         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6372
6373         return 0;
6374
6375 alloc_mem_err:
6376         bnx2x_free_mem(bp);
6377         return -ENOMEM;
6378
6379 #undef BNX2X_PCI_ALLOC
6380 #undef BNX2X_ALLOC
6381 }
6382
6383 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6384 {
6385         int i;
6386
6387         for_each_tx_queue(bp, i) {
6388                 struct bnx2x_fastpath *fp = &bp->fp[i];
6389
6390                 u16 bd_cons = fp->tx_bd_cons;
6391                 u16 sw_prod = fp->tx_pkt_prod;
6392                 u16 sw_cons = fp->tx_pkt_cons;
6393
6394                 while (sw_cons != sw_prod) {
6395                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6396                         sw_cons++;
6397                 }
6398         }
6399 }
6400
6401 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6402 {
6403         int i, j;
6404
6405         for_each_rx_queue(bp, j) {
6406                 struct bnx2x_fastpath *fp = &bp->fp[j];
6407
6408                 for (i = 0; i < NUM_RX_BD; i++) {
6409                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6410                         struct sk_buff *skb = rx_buf->skb;
6411
6412                         if (skb == NULL)
6413                                 continue;
6414
6415                         pci_unmap_single(bp->pdev,
6416                                          pci_unmap_addr(rx_buf, mapping),
6417                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6418
6419                         rx_buf->skb = NULL;
6420                         dev_kfree_skb(skb);
6421                 }
6422                 if (!fp->disable_tpa)
6423                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6424                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6425                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6426         }
6427 }
6428
6429 static void bnx2x_free_skbs(struct bnx2x *bp)
6430 {
6431         bnx2x_free_tx_skbs(bp);
6432         bnx2x_free_rx_skbs(bp);
6433 }
6434
6435 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6436 {
6437         int i, offset = 1;
6438
6439         free_irq(bp->msix_table[0].vector, bp->dev);
6440         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6441            bp->msix_table[0].vector);
6442
6443         for_each_queue(bp, i) {
6444                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6445                    "state %x\n", i, bp->msix_table[i + offset].vector,
6446                    bnx2x_fp(bp, i, state));
6447
6448                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6449         }
6450 }
6451
6452 static void bnx2x_free_irq(struct bnx2x *bp)
6453 {
6454         if (bp->flags & USING_MSIX_FLAG) {
6455                 bnx2x_free_msix_irqs(bp);
6456                 pci_disable_msix(bp->pdev);
6457                 bp->flags &= ~USING_MSIX_FLAG;
6458
6459         } else if (bp->flags & USING_MSI_FLAG) {
6460                 free_irq(bp->pdev->irq, bp->dev);
6461                 pci_disable_msi(bp->pdev);
6462                 bp->flags &= ~USING_MSI_FLAG;
6463
6464         } else
6465                 free_irq(bp->pdev->irq, bp->dev);
6466 }
6467
6468 static int bnx2x_enable_msix(struct bnx2x *bp)
6469 {
6470         int i, rc, offset = 1;
6471         int igu_vec = 0;
6472
6473         bp->msix_table[0].entry = igu_vec;
6474         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6475
6476         for_each_queue(bp, i) {
6477                 igu_vec = BP_L_ID(bp) + offset + i;
6478                 bp->msix_table[i + offset].entry = igu_vec;
6479                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6480                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6481         }
6482
6483         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6484                              BNX2X_NUM_QUEUES(bp) + offset);
6485         if (rc) {
6486                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6487                 return rc;
6488         }
6489
6490         bp->flags |= USING_MSIX_FLAG;
6491
6492         return 0;
6493 }
6494
6495 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6496 {
6497         int i, rc, offset = 1;
6498
6499         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6500                          bp->dev->name, bp->dev);
6501         if (rc) {
6502                 BNX2X_ERR("request sp irq failed\n");
6503                 return -EBUSY;
6504         }
6505
6506         for_each_queue(bp, i) {
6507                 struct bnx2x_fastpath *fp = &bp->fp[i];
6508
6509                 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6510                 rc = request_irq(bp->msix_table[i + offset].vector,
6511                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6512                 if (rc) {
6513                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6514                         bnx2x_free_msix_irqs(bp);
6515                         return -EBUSY;
6516                 }
6517
6518                 fp->state = BNX2X_FP_STATE_IRQ;
6519         }
6520
6521         i = BNX2X_NUM_QUEUES(bp);
6522         if (is_multi(bp))
6523                 printk(KERN_INFO PFX
6524                        "%s: using MSI-X  IRQs: sp %d  fp %d - %d\n",
6525                        bp->dev->name, bp->msix_table[0].vector,
6526                        bp->msix_table[offset].vector,
6527                        bp->msix_table[offset + i - 1].vector);
6528         else
6529                 printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp %d\n",
6530                        bp->dev->name, bp->msix_table[0].vector,
6531                        bp->msix_table[offset + i - 1].vector);
6532
6533         return 0;
6534 }
6535
6536 static int bnx2x_enable_msi(struct bnx2x *bp)
6537 {
6538         int rc;
6539
6540         rc = pci_enable_msi(bp->pdev);
6541         if (rc) {
6542                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6543                 return -1;
6544         }
6545         bp->flags |= USING_MSI_FLAG;
6546
6547         return 0;
6548 }
6549
6550 static int bnx2x_req_irq(struct bnx2x *bp)
6551 {
6552         unsigned long flags;
6553         int rc;
6554
6555         if (bp->flags & USING_MSI_FLAG)
6556                 flags = 0;
6557         else
6558                 flags = IRQF_SHARED;
6559
6560         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6561                          bp->dev->name, bp->dev);
6562         if (!rc)
6563                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6564
6565         return rc;
6566 }
6567
6568 static void bnx2x_napi_enable(struct bnx2x *bp)
6569 {
6570         int i;
6571
6572         for_each_rx_queue(bp, i)
6573                 napi_enable(&bnx2x_fp(bp, i, napi));
6574 }
6575
6576 static void bnx2x_napi_disable(struct bnx2x *bp)
6577 {
6578         int i;
6579
6580         for_each_rx_queue(bp, i)
6581                 napi_disable(&bnx2x_fp(bp, i, napi));
6582 }
6583
6584 static void bnx2x_netif_start(struct bnx2x *bp)
6585 {
6586         if (atomic_dec_and_test(&bp->intr_sem)) {
6587                 if (netif_running(bp->dev)) {
6588                         bnx2x_napi_enable(bp);
6589                         bnx2x_int_enable(bp);
6590                         if (bp->state == BNX2X_STATE_OPEN)
6591                                 netif_tx_wake_all_queues(bp->dev);
6592                 }
6593         }
6594 }
6595
6596 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6597 {
6598         bnx2x_int_disable_sync(bp, disable_hw);
6599         bnx2x_napi_disable(bp);
6600         netif_tx_disable(bp->dev);
6601         bp->dev->trans_start = jiffies; /* prevent tx timeout */
6602 }
6603
6604 /*
6605  * Init service functions
6606  */
6607
6608 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6609 {
6610         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6611         int port = BP_PORT(bp);
6612
6613         /* CAM allocation
6614          * unicasts 0-31:port0 32-63:port1
6615          * multicast 64-127:port0 128-191:port1
6616          */
6617         config->hdr.length = 2;
6618         config->hdr.offset = port ? 32 : 0;
6619         config->hdr.client_id = bp->fp->cl_id;
6620         config->hdr.reserved1 = 0;
6621
6622         /* primary MAC */
6623         config->config_table[0].cam_entry.msb_mac_addr =
6624                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6625         config->config_table[0].cam_entry.middle_mac_addr =
6626                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6627         config->config_table[0].cam_entry.lsb_mac_addr =
6628                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6629         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6630         if (set)
6631                 config->config_table[0].target_table_entry.flags = 0;
6632         else
6633                 CAM_INVALIDATE(config->config_table[0]);
6634         config->config_table[0].target_table_entry.client_id = 0;
6635         config->config_table[0].target_table_entry.vlan_id = 0;
6636
6637         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6638            (set ? "setting" : "clearing"),
6639            config->config_table[0].cam_entry.msb_mac_addr,
6640            config->config_table[0].cam_entry.middle_mac_addr,
6641            config->config_table[0].cam_entry.lsb_mac_addr);
6642
6643         /* broadcast */
6644         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6645         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6646         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6647         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6648         if (set)
6649                 config->config_table[1].target_table_entry.flags =
6650                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6651         else
6652                 CAM_INVALIDATE(config->config_table[1]);
6653         config->config_table[1].target_table_entry.client_id = 0;
6654         config->config_table[1].target_table_entry.vlan_id = 0;
6655
6656         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6657                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6658                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6659 }
6660
6661 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6662 {
6663         struct mac_configuration_cmd_e1h *config =
6664                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6665
6666         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6667                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6668                 return;
6669         }
6670
6671         /* CAM allocation for E1H
6672          * unicasts: by func number
6673          * multicast: 20+FUNC*20, 20 each
6674          */
6675         config->hdr.length = 1;
6676         config->hdr.offset = BP_FUNC(bp);
6677         config->hdr.client_id = bp->fp->cl_id;
6678         config->hdr.reserved1 = 0;
6679
6680         /* primary MAC */
6681         config->config_table[0].msb_mac_addr =
6682                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6683         config->config_table[0].middle_mac_addr =
6684                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6685         config->config_table[0].lsb_mac_addr =
6686                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6687         config->config_table[0].client_id = BP_L_ID(bp);
6688         config->config_table[0].vlan_id = 0;
6689         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6690         if (set)
6691                 config->config_table[0].flags = BP_PORT(bp);
6692         else
6693                 config->config_table[0].flags =
6694                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6695
6696         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6697            (set ? "setting" : "clearing"),
6698            config->config_table[0].msb_mac_addr,
6699            config->config_table[0].middle_mac_addr,
6700            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6701
6702         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6703                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6704                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6705 }
6706
6707 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6708                              int *state_p, int poll)
6709 {
6710         /* can take a while if any port is running */
6711         int cnt = 5000;
6712
6713         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6714            poll ? "polling" : "waiting", state, idx);
6715
6716         might_sleep();
6717         while (cnt--) {
6718                 if (poll) {
6719                         bnx2x_rx_int(bp->fp, 10);
6720                         /* if index is different from 0
6721                          * the reply for some commands will
6722                          * be on the non default queue
6723                          */
6724                         if (idx)
6725                                 bnx2x_rx_int(&bp->fp[idx], 10);
6726                 }
6727
6728                 mb(); /* state is changed by bnx2x_sp_event() */
6729                 if (*state_p == state) {
6730 #ifdef BNX2X_STOP_ON_ERROR
6731                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6732 #endif
6733                         return 0;
6734                 }
6735
6736                 msleep(1);
6737         }
6738
6739         /* timeout! */
6740         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6741                   poll ? "polling" : "waiting", state, idx);
6742 #ifdef BNX2X_STOP_ON_ERROR
6743         bnx2x_panic();
6744 #endif
6745
6746         return -EBUSY;
6747 }
6748
6749 static int bnx2x_setup_leading(struct bnx2x *bp)
6750 {
6751         int rc;
6752
6753         /* reset IGU state */
6754         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6755
6756         /* SETUP ramrod */
6757         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6758
6759         /* Wait for completion */
6760         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6761
6762         return rc;
6763 }
6764
6765 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6766 {
6767         struct bnx2x_fastpath *fp = &bp->fp[index];
6768
6769         /* reset IGU state */
6770         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6771
6772         /* SETUP ramrod */
6773         fp->state = BNX2X_FP_STATE_OPENING;
6774         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6775                       fp->cl_id, 0);
6776
6777         /* Wait for completion */
6778         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6779                                  &(fp->state), 0);
6780 }
6781
6782 static int bnx2x_poll(struct napi_struct *napi, int budget);
6783
6784 static void bnx2x_set_int_mode(struct bnx2x *bp)
6785 {
6786         int num_queues;
6787
6788         switch (int_mode) {
6789         case INT_MODE_INTx:
6790         case INT_MODE_MSI:
6791                 num_queues = 1;
6792                 bp->num_rx_queues = num_queues;
6793                 bp->num_tx_queues = num_queues;
6794                 DP(NETIF_MSG_IFUP,
6795                    "set number of queues to %d\n", num_queues);
6796                 break;
6797
6798         case INT_MODE_MSIX:
6799         default:
6800                 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6801                         num_queues = min_t(u32, num_online_cpus(),
6802                                            BNX2X_MAX_QUEUES(bp));
6803                 else
6804                         num_queues = 1;
6805                 bp->num_rx_queues = num_queues;
6806                 bp->num_tx_queues = num_queues;
6807                 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6808                    "  number of tx queues to %d\n",
6809                    bp->num_rx_queues, bp->num_tx_queues);
6810                 /* if we can't use MSI-X we only need one fp,
6811                  * so try to enable MSI-X with the requested number of fp's
6812                  * and fallback to MSI or legacy INTx with one fp
6813                  */
6814                 if (bnx2x_enable_msix(bp)) {
6815                         /* failed to enable MSI-X */
6816                         num_queues = 1;
6817                         bp->num_rx_queues = num_queues;
6818                         bp->num_tx_queues = num_queues;
6819                         if (bp->multi_mode)
6820                                 BNX2X_ERR("Multi requested but failed to "
6821                                           "enable MSI-X  set number of "
6822                                           "queues to %d\n", num_queues);
6823                 }
6824                 break;
6825         }
6826         bp->dev->real_num_tx_queues = bp->num_tx_queues;
6827 }
6828
6829 static void bnx2x_set_rx_mode(struct net_device *dev);
6830
6831 /* must be called with rtnl_lock */
6832 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6833 {
6834         u32 load_code;
6835         int i, rc = 0;
6836 #ifdef BNX2X_STOP_ON_ERROR
6837         DP(NETIF_MSG_IFUP, "enter  load_mode %d\n", load_mode);
6838         if (unlikely(bp->panic))
6839                 return -EPERM;
6840 #endif
6841
6842         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6843
6844         bnx2x_set_int_mode(bp);
6845
6846         if (bnx2x_alloc_mem(bp))
6847                 return -ENOMEM;
6848
6849         for_each_rx_queue(bp, i)
6850                 bnx2x_fp(bp, i, disable_tpa) =
6851                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6852
6853         for_each_rx_queue(bp, i)
6854                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6855                                bnx2x_poll, 128);
6856
6857 #ifdef BNX2X_STOP_ON_ERROR
6858         for_each_rx_queue(bp, i) {
6859                 struct bnx2x_fastpath *fp = &bp->fp[i];
6860
6861                 fp->poll_no_work = 0;
6862                 fp->poll_calls = 0;
6863                 fp->poll_max_calls = 0;
6864                 fp->poll_complete = 0;
6865                 fp->poll_exit = 0;
6866         }
6867 #endif
6868         bnx2x_napi_enable(bp);
6869
6870         if (bp->flags & USING_MSIX_FLAG) {
6871                 rc = bnx2x_req_msix_irqs(bp);
6872                 if (rc) {
6873                         pci_disable_msix(bp->pdev);
6874                         goto load_error1;
6875                 }
6876         } else {
6877                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6878                         bnx2x_enable_msi(bp);
6879                 bnx2x_ack_int(bp);
6880                 rc = bnx2x_req_irq(bp);
6881                 if (rc) {
6882                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6883                         if (bp->flags & USING_MSI_FLAG)
6884                                 pci_disable_msi(bp->pdev);
6885                         goto load_error1;
6886                 }
6887                 if (bp->flags & USING_MSI_FLAG) {
6888                         bp->dev->irq = bp->pdev->irq;
6889                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
6890                                bp->dev->name, bp->pdev->irq);
6891                 }
6892         }
6893
6894         /* Send LOAD_REQUEST command to MCP
6895            Returns the type of LOAD command:
6896            if it is the first port to be initialized
6897            common blocks should be initialized, otherwise - not
6898         */
6899         if (!BP_NOMCP(bp)) {
6900                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6901                 if (!load_code) {
6902                         BNX2X_ERR("MCP response failure, aborting\n");
6903                         rc = -EBUSY;
6904                         goto load_error2;
6905                 }
6906                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6907                         rc = -EBUSY; /* other port in diagnostic mode */
6908                         goto load_error2;
6909                 }
6910
6911         } else {
6912                 int port = BP_PORT(bp);
6913
6914                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
6915                    load_count[0], load_count[1], load_count[2]);
6916                 load_count[0]++;
6917                 load_count[1 + port]++;
6918                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
6919                    load_count[0], load_count[1], load_count[2]);
6920                 if (load_count[0] == 1)
6921                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6922                 else if (load_count[1 + port] == 1)
6923                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6924                 else
6925                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6926         }
6927
6928         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6929             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6930                 bp->port.pmf = 1;
6931         else
6932                 bp->port.pmf = 0;
6933         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6934
6935         /* Initialize HW */
6936         rc = bnx2x_init_hw(bp, load_code);
6937         if (rc) {
6938                 BNX2X_ERR("HW init failed, aborting\n");
6939                 goto load_error2;
6940         }
6941
6942         /* Setup NIC internals and enable interrupts */
6943         bnx2x_nic_init(bp, load_code);
6944
6945         /* Send LOAD_DONE command to MCP */
6946         if (!BP_NOMCP(bp)) {
6947                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6948                 if (!load_code) {
6949                         BNX2X_ERR("MCP response failure, aborting\n");
6950                         rc = -EBUSY;
6951                         goto load_error3;
6952                 }
6953         }
6954
6955         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6956
6957         rc = bnx2x_setup_leading(bp);
6958         if (rc) {
6959                 BNX2X_ERR("Setup leading failed!\n");
6960                 goto load_error3;
6961         }
6962
6963         if (CHIP_IS_E1H(bp))
6964                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6965                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
6966                         bp->state = BNX2X_STATE_DISABLED;
6967                 }
6968
6969         if (bp->state == BNX2X_STATE_OPEN)
6970                 for_each_nondefault_queue(bp, i) {
6971                         rc = bnx2x_setup_multi(bp, i);
6972                         if (rc)
6973                                 goto load_error3;
6974                 }
6975
6976         if (CHIP_IS_E1(bp))
6977                 bnx2x_set_mac_addr_e1(bp, 1);
6978         else
6979                 bnx2x_set_mac_addr_e1h(bp, 1);
6980
6981         if (bp->port.pmf)
6982                 bnx2x_initial_phy_init(bp, load_mode);
6983
6984         /* Start fast path */
6985         switch (load_mode) {
6986         case LOAD_NORMAL:
6987                 /* Tx queue should be only reenabled */
6988                 netif_tx_wake_all_queues(bp->dev);
6989                 /* Initialize the receive filter. */
6990                 bnx2x_set_rx_mode(bp->dev);
6991                 break;
6992
6993         case LOAD_OPEN:
6994                 netif_tx_start_all_queues(bp->dev);
6995                 /* Initialize the receive filter. */
6996                 bnx2x_set_rx_mode(bp->dev);
6997                 break;
6998
6999         case LOAD_DIAG:
7000                 /* Initialize the receive filter. */
7001                 bnx2x_set_rx_mode(bp->dev);
7002                 bp->state = BNX2X_STATE_DIAG;
7003                 break;
7004
7005         default:
7006                 break;
7007         }
7008
7009         if (!bp->port.pmf)
7010                 bnx2x__link_status_update(bp);
7011
7012         /* start the timer */
7013         mod_timer(&bp->timer, jiffies + bp->current_interval);
7014
7015
7016         return 0;
7017
7018 load_error3:
7019         bnx2x_int_disable_sync(bp, 1);
7020         if (!BP_NOMCP(bp)) {
7021                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7022                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7023         }
7024         bp->port.pmf = 0;
7025         /* Free SKBs, SGEs, TPA pool and driver internals */
7026         bnx2x_free_skbs(bp);
7027         for_each_rx_queue(bp, i)
7028                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7029 load_error2:
7030         /* Release IRQs */
7031         bnx2x_free_irq(bp);
7032 load_error1:
7033         bnx2x_napi_disable(bp);
7034         for_each_rx_queue(bp, i)
7035                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7036         bnx2x_free_mem(bp);
7037
7038         return rc;
7039 }
7040
7041 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7042 {
7043         struct bnx2x_fastpath *fp = &bp->fp[index];
7044         int rc;
7045
7046         /* halt the connection */
7047         fp->state = BNX2X_FP_STATE_HALTING;
7048         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7049
7050         /* Wait for completion */
7051         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7052                                &(fp->state), 1);
7053         if (rc) /* timeout */
7054                 return rc;
7055
7056         /* delete cfc entry */
7057         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7058
7059         /* Wait for completion */
7060         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7061                                &(fp->state), 1);
7062         return rc;
7063 }
7064
7065 static int bnx2x_stop_leading(struct bnx2x *bp)
7066 {
7067         __le16 dsb_sp_prod_idx;
7068         /* if the other port is handling traffic,
7069            this can take a lot of time */
7070         int cnt = 500;
7071         int rc;
7072
7073         might_sleep();
7074
7075         /* Send HALT ramrod */
7076         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7077         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7078
7079         /* Wait for completion */
7080         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7081                                &(bp->fp[0].state), 1);
7082         if (rc) /* timeout */
7083                 return rc;
7084
7085         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7086
7087         /* Send PORT_DELETE ramrod */
7088         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7089
7090         /* Wait for completion to arrive on default status block
7091            we are going to reset the chip anyway
7092            so there is not much to do if this times out
7093          */
7094         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7095                 if (!cnt) {
7096                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7097                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7098                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7099 #ifdef BNX2X_STOP_ON_ERROR
7100                         bnx2x_panic();
7101 #endif
7102                         rc = -EBUSY;
7103                         break;
7104                 }
7105                 cnt--;
7106                 msleep(1);
7107                 rmb(); /* Refresh the dsb_sp_prod */
7108         }
7109         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7110         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7111
7112         return rc;
7113 }
7114
7115 static void bnx2x_reset_func(struct bnx2x *bp)
7116 {
7117         int port = BP_PORT(bp);
7118         int func = BP_FUNC(bp);
7119         int base, i;
7120
7121         /* Configure IGU */
7122         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7123         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7124
7125         /* Clear ILT */
7126         base = FUNC_ILT_BASE(func);
7127         for (i = base; i < base + ILT_PER_FUNC; i++)
7128                 bnx2x_ilt_wr(bp, i, 0);
7129 }
7130
7131 static void bnx2x_reset_port(struct bnx2x *bp)
7132 {
7133         int port = BP_PORT(bp);
7134         u32 val;
7135
7136         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7137
7138         /* Do not rcv packets to BRB */
7139         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7140         /* Do not direct rcv packets that are not for MCP to the BRB */
7141         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7142                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7143
7144         /* Configure AEU */
7145         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7146
7147         msleep(100);
7148         /* Check for BRB port occupancy */
7149         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7150         if (val)
7151                 DP(NETIF_MSG_IFDOWN,
7152                    "BRB1 is not empty  %d blocks are occupied\n", val);
7153
7154         /* TODO: Close Doorbell port? */
7155 }
7156
7157 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7158 {
7159         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7160            BP_FUNC(bp), reset_code);
7161
7162         switch (reset_code) {
7163         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7164                 bnx2x_reset_port(bp);
7165                 bnx2x_reset_func(bp);
7166                 bnx2x_reset_common(bp);
7167                 break;
7168
7169         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7170                 bnx2x_reset_port(bp);
7171                 bnx2x_reset_func(bp);
7172                 break;
7173
7174         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7175                 bnx2x_reset_func(bp);
7176                 break;
7177
7178         default:
7179                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7180                 break;
7181         }
7182 }
7183
7184 /* must be called with rtnl_lock */
7185 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7186 {
7187         int port = BP_PORT(bp);
7188         u32 reset_code = 0;
7189         int i, cnt, rc;
7190
7191         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7192
7193         bp->rx_mode = BNX2X_RX_MODE_NONE;
7194         bnx2x_set_storm_rx_mode(bp);
7195
7196         bnx2x_netif_stop(bp, 1);
7197
7198         del_timer_sync(&bp->timer);
7199         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7200                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7201         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7202
7203         /* Release IRQs */
7204         bnx2x_free_irq(bp);
7205
7206         /* Wait until tx fastpath tasks complete */
7207         for_each_tx_queue(bp, i) {
7208                 struct bnx2x_fastpath *fp = &bp->fp[i];
7209
7210                 cnt = 1000;
7211                 while (bnx2x_has_tx_work_unload(fp)) {
7212
7213                         bnx2x_tx_int(fp);
7214                         if (!cnt) {
7215                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7216                                           i);
7217 #ifdef BNX2X_STOP_ON_ERROR
7218                                 bnx2x_panic();
7219                                 return -EBUSY;
7220 #else
7221                                 break;
7222 #endif
7223                         }
7224                         cnt--;
7225                         msleep(1);
7226                 }
7227         }
7228         /* Give HW time to discard old tx messages */
7229         msleep(1);
7230
7231         if (CHIP_IS_E1(bp)) {
7232                 struct mac_configuration_cmd *config =
7233                                                 bnx2x_sp(bp, mcast_config);
7234
7235                 bnx2x_set_mac_addr_e1(bp, 0);
7236
7237                 for (i = 0; i < config->hdr.length; i++)
7238                         CAM_INVALIDATE(config->config_table[i]);
7239
7240                 config->hdr.length = i;
7241                 if (CHIP_REV_IS_SLOW(bp))
7242                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7243                 else
7244                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7245                 config->hdr.client_id = bp->fp->cl_id;
7246                 config->hdr.reserved1 = 0;
7247
7248                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7249                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7250                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7251
7252         } else { /* E1H */
7253                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7254
7255                 bnx2x_set_mac_addr_e1h(bp, 0);
7256
7257                 for (i = 0; i < MC_HASH_SIZE; i++)
7258                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7259         }
7260
7261         if (unload_mode == UNLOAD_NORMAL)
7262                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7263
7264         else if (bp->flags & NO_WOL_FLAG) {
7265                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7266                 if (CHIP_IS_E1H(bp))
7267                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7268
7269         } else if (bp->wol) {
7270                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7271                 u8 *mac_addr = bp->dev->dev_addr;
7272                 u32 val;
7273                 /* The mac address is written to entries 1-4 to
7274                    preserve entry 0 which is used by the PMF */
7275                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7276
7277                 val = (mac_addr[0] << 8) | mac_addr[1];
7278                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7279
7280                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7281                       (mac_addr[4] << 8) | mac_addr[5];
7282                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7283
7284                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7285
7286         } else
7287                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7288
7289         /* Close multi and leading connections
7290            Completions for ramrods are collected in a synchronous way */
7291         for_each_nondefault_queue(bp, i)
7292                 if (bnx2x_stop_multi(bp, i))
7293                         goto unload_error;
7294
7295         rc = bnx2x_stop_leading(bp);
7296         if (rc) {
7297                 BNX2X_ERR("Stop leading failed!\n");
7298 #ifdef BNX2X_STOP_ON_ERROR
7299                 return -EBUSY;
7300 #else
7301                 goto unload_error;
7302 #endif
7303         }
7304
7305 unload_error:
7306         if (!BP_NOMCP(bp))
7307                 reset_code = bnx2x_fw_command(bp, reset_code);
7308         else {
7309                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7310                    load_count[0], load_count[1], load_count[2]);
7311                 load_count[0]--;
7312                 load_count[1 + port]--;
7313                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7314                    load_count[0], load_count[1], load_count[2]);
7315                 if (load_count[0] == 0)
7316                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7317                 else if (load_count[1 + port] == 0)
7318                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7319                 else
7320                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7321         }
7322
7323         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7324             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7325                 bnx2x__link_reset(bp);
7326
7327         /* Reset the chip */
7328         bnx2x_reset_chip(bp, reset_code);
7329
7330         /* Report UNLOAD_DONE to MCP */
7331         if (!BP_NOMCP(bp))
7332                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7333
7334         bp->port.pmf = 0;
7335
7336         /* Free SKBs, SGEs, TPA pool and driver internals */
7337         bnx2x_free_skbs(bp);
7338         for_each_rx_queue(bp, i)
7339                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7340         for_each_rx_queue(bp, i)
7341                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7342         bnx2x_free_mem(bp);
7343
7344         bp->state = BNX2X_STATE_CLOSED;
7345
7346         netif_carrier_off(bp->dev);
7347
7348         return 0;
7349 }
7350
7351 static void bnx2x_reset_task(struct work_struct *work)
7352 {
7353         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7354
7355 #ifdef BNX2X_STOP_ON_ERROR
7356         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7357                   " so reset not done to allow debug dump,\n"
7358                   " you will need to reboot when done\n");
7359         return;
7360 #endif
7361
7362         rtnl_lock();
7363
7364         if (!netif_running(bp->dev))
7365                 goto reset_task_exit;
7366
7367         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7368         bnx2x_nic_load(bp, LOAD_NORMAL);
7369
7370 reset_task_exit:
7371         rtnl_unlock();
7372 }
7373
7374 /* end of nic load/unload */
7375
7376 /* ethtool_ops */
7377
7378 /*
7379  * Init service functions
7380  */
7381
7382 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7383 {
7384         switch (func) {
7385         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7386         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7387         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7388         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7389         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7390         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7391         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7392         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7393         default:
7394                 BNX2X_ERR("Unsupported function index: %d\n", func);
7395                 return (u32)(-1);
7396         }
7397 }
7398
7399 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7400 {
7401         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7402
7403         /* Flush all outstanding writes */
7404         mmiowb();
7405
7406         /* Pretend to be function 0 */
7407         REG_WR(bp, reg, 0);
7408         /* Flush the GRC transaction (in the chip) */
7409         new_val = REG_RD(bp, reg);
7410         if (new_val != 0) {
7411                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7412                           new_val);
7413                 BUG();
7414         }
7415
7416         /* From now we are in the "like-E1" mode */
7417         bnx2x_int_disable(bp);
7418
7419         /* Flush all outstanding writes */
7420         mmiowb();
7421
7422         /* Restore the original funtion settings */
7423         REG_WR(bp, reg, orig_func);
7424         new_val = REG_RD(bp, reg);
7425         if (new_val != orig_func) {
7426                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7427                           orig_func, new_val);
7428                 BUG();
7429         }
7430 }
7431
7432 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7433 {
7434         if (CHIP_IS_E1H(bp))
7435                 bnx2x_undi_int_disable_e1h(bp, func);
7436         else
7437                 bnx2x_int_disable(bp);
7438 }
7439
7440 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7441 {
7442         u32 val;
7443
7444         /* Check if there is any driver already loaded */
7445         val = REG_RD(bp, MISC_REG_UNPREPARED);
7446         if (val == 0x1) {
7447                 /* Check if it is the UNDI driver
7448                  * UNDI driver initializes CID offset for normal bell to 0x7
7449                  */
7450                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7451                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7452                 if (val == 0x7) {
7453                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7454                         /* save our func */
7455                         int func = BP_FUNC(bp);
7456                         u32 swap_en;
7457                         u32 swap_val;
7458
7459                         /* clear the UNDI indication */
7460                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7461
7462                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7463
7464                         /* try unload UNDI on port 0 */
7465                         bp->func = 0;
7466                         bp->fw_seq =
7467                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7468                                 DRV_MSG_SEQ_NUMBER_MASK);
7469                         reset_code = bnx2x_fw_command(bp, reset_code);
7470
7471                         /* if UNDI is loaded on the other port */
7472                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7473
7474                                 /* send "DONE" for previous unload */
7475                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7476
7477                                 /* unload UNDI on port 1 */
7478                                 bp->func = 1;
7479                                 bp->fw_seq =
7480                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7481                                         DRV_MSG_SEQ_NUMBER_MASK);
7482                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7483
7484                                 bnx2x_fw_command(bp, reset_code);
7485                         }
7486
7487                         /* now it's safe to release the lock */
7488                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7489
7490                         bnx2x_undi_int_disable(bp, func);
7491
7492                         /* close input traffic and wait for it */
7493                         /* Do not rcv packets to BRB */
7494                         REG_WR(bp,
7495                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7496                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7497                         /* Do not direct rcv packets that are not for MCP to
7498                          * the BRB */
7499                         REG_WR(bp,
7500                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7501                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7502                         /* clear AEU */
7503                         REG_WR(bp,
7504                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7505                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7506                         msleep(10);
7507
7508                         /* save NIG port swap info */
7509                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7510                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7511                         /* reset device */
7512                         REG_WR(bp,
7513                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7514                                0xd3ffffff);
7515                         REG_WR(bp,
7516                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7517                                0x1403);
7518                         /* take the NIG out of reset and restore swap values */
7519                         REG_WR(bp,
7520                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7521                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7522                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7523                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7524
7525                         /* send unload done to the MCP */
7526                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7527
7528                         /* restore our func and fw_seq */
7529                         bp->func = func;
7530                         bp->fw_seq =
7531                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7532                                 DRV_MSG_SEQ_NUMBER_MASK);
7533
7534                 } else
7535                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7536         }
7537 }
7538
7539 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7540 {
7541         u32 val, val2, val3, val4, id;
7542         u16 pmc;
7543
7544         /* Get the chip revision id and number. */
7545         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7546         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7547         id = ((val & 0xffff) << 16);
7548         val = REG_RD(bp, MISC_REG_CHIP_REV);
7549         id |= ((val & 0xf) << 12);
7550         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7551         id |= ((val & 0xff) << 4);
7552         val = REG_RD(bp, MISC_REG_BOND_ID);
7553         id |= (val & 0xf);
7554         bp->common.chip_id = id;
7555         bp->link_params.chip_id = bp->common.chip_id;
7556         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7557
7558         val = (REG_RD(bp, 0x2874) & 0x55);
7559         if ((bp->common.chip_id & 0x1) ||
7560             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7561                 bp->flags |= ONE_PORT_FLAG;
7562                 BNX2X_DEV_INFO("single port device\n");
7563         }
7564
7565         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7566         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7567                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7568         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7569                        bp->common.flash_size, bp->common.flash_size);
7570
7571         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7572         bp->link_params.shmem_base = bp->common.shmem_base;
7573         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7574
7575         if (!bp->common.shmem_base ||
7576             (bp->common.shmem_base < 0xA0000) ||
7577             (bp->common.shmem_base >= 0xC0000)) {
7578                 BNX2X_DEV_INFO("MCP not active\n");
7579                 bp->flags |= NO_MCP_FLAG;
7580                 return;
7581         }
7582
7583         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7584         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7585                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7586                 BNX2X_ERR("BAD MCP validity signature\n");
7587
7588         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7589         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7590
7591         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7592                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7593                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7594
7595         bp->link_params.feature_config_flags = 0;
7596         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7597         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7598                 bp->link_params.feature_config_flags |=
7599                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7600         else
7601                 bp->link_params.feature_config_flags &=
7602                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7603
7604         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7605         bp->common.bc_ver = val;
7606         BNX2X_DEV_INFO("bc_ver %X\n", val);
7607         if (val < BNX2X_BC_VER) {
7608                 /* for now only warn
7609                  * later we might need to enforce this */
7610                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7611                           " please upgrade BC\n", BNX2X_BC_VER, val);
7612         }
7613
7614         if (BP_E1HVN(bp) == 0) {
7615                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7616                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7617         } else {
7618                 /* no WOL capability for E1HVN != 0 */
7619                 bp->flags |= NO_WOL_FLAG;
7620         }
7621         BNX2X_DEV_INFO("%sWoL capable\n",
7622                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7623
7624         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7625         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7626         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7627         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7628
7629         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7630                val, val2, val3, val4);
7631 }
7632
7633 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7634                                                     u32 switch_cfg)
7635 {
7636         int port = BP_PORT(bp);
7637         u32 ext_phy_type;
7638
7639         switch (switch_cfg) {
7640         case SWITCH_CFG_1G:
7641                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7642
7643                 ext_phy_type =
7644                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7645                 switch (ext_phy_type) {
7646                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7647                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7648                                        ext_phy_type);
7649
7650                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7651                                                SUPPORTED_10baseT_Full |
7652                                                SUPPORTED_100baseT_Half |
7653                                                SUPPORTED_100baseT_Full |
7654                                                SUPPORTED_1000baseT_Full |
7655                                                SUPPORTED_2500baseX_Full |
7656                                                SUPPORTED_TP |
7657                                                SUPPORTED_FIBRE |
7658                                                SUPPORTED_Autoneg |
7659                                                SUPPORTED_Pause |
7660                                                SUPPORTED_Asym_Pause);
7661                         break;
7662
7663                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7664                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7665                                        ext_phy_type);
7666
7667                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7668                                                SUPPORTED_10baseT_Full |
7669                                                SUPPORTED_100baseT_Half |
7670                                                SUPPORTED_100baseT_Full |
7671                                                SUPPORTED_1000baseT_Full |
7672                                                SUPPORTED_TP |
7673                                                SUPPORTED_FIBRE |
7674                                                SUPPORTED_Autoneg |
7675                                                SUPPORTED_Pause |
7676                                                SUPPORTED_Asym_Pause);
7677                         break;
7678
7679                 default:
7680                         BNX2X_ERR("NVRAM config error. "
7681                                   "BAD SerDes ext_phy_config 0x%x\n",
7682                                   bp->link_params.ext_phy_config);
7683                         return;
7684                 }
7685
7686                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7687                                            port*0x10);
7688                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7689                 break;
7690
7691         case SWITCH_CFG_10G:
7692                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7693
7694                 ext_phy_type =
7695                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7696                 switch (ext_phy_type) {
7697                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7698                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7699                                        ext_phy_type);
7700
7701                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7702                                                SUPPORTED_10baseT_Full |
7703                                                SUPPORTED_100baseT_Half |
7704                                                SUPPORTED_100baseT_Full |
7705                                                SUPPORTED_1000baseT_Full |
7706                                                SUPPORTED_2500baseX_Full |
7707                                                SUPPORTED_10000baseT_Full |
7708                                                SUPPORTED_TP |
7709                                                SUPPORTED_FIBRE |
7710                                                SUPPORTED_Autoneg |
7711                                                SUPPORTED_Pause |
7712                                                SUPPORTED_Asym_Pause);
7713                         break;
7714
7715                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7716                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7717                                        ext_phy_type);
7718
7719                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7720                                                SUPPORTED_1000baseT_Full |
7721                                                SUPPORTED_FIBRE |
7722                                                SUPPORTED_Autoneg |
7723                                                SUPPORTED_Pause |
7724                                                SUPPORTED_Asym_Pause);
7725                         break;
7726
7727                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7728                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7729                                        ext_phy_type);
7730
7731                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7732                                                SUPPORTED_2500baseX_Full |
7733                                                SUPPORTED_1000baseT_Full |
7734                                                SUPPORTED_FIBRE |
7735                                                SUPPORTED_Autoneg |
7736                                                SUPPORTED_Pause |
7737                                                SUPPORTED_Asym_Pause);
7738                         break;
7739
7740                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7741                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7742                                        ext_phy_type);
7743
7744                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7745                                                SUPPORTED_FIBRE |
7746                                                SUPPORTED_Pause |
7747                                                SUPPORTED_Asym_Pause);
7748                         break;
7749
7750                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7751                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7752                                        ext_phy_type);
7753
7754                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7755                                                SUPPORTED_1000baseT_Full |
7756                                                SUPPORTED_FIBRE |
7757                                                SUPPORTED_Pause |
7758                                                SUPPORTED_Asym_Pause);
7759                         break;
7760
7761                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7762                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7763                                        ext_phy_type);
7764
7765                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7766                                                SUPPORTED_1000baseT_Full |
7767                                                SUPPORTED_Autoneg |
7768                                                SUPPORTED_FIBRE |
7769                                                SUPPORTED_Pause |
7770                                                SUPPORTED_Asym_Pause);
7771                         break;
7772
7773                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7774                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7775                                        ext_phy_type);
7776
7777                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7778                                                SUPPORTED_TP |
7779                                                SUPPORTED_Autoneg |
7780                                                SUPPORTED_Pause |
7781                                                SUPPORTED_Asym_Pause);
7782                         break;
7783
7784                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7785                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7786                                        ext_phy_type);
7787
7788                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7789                                                SUPPORTED_10baseT_Full |
7790                                                SUPPORTED_100baseT_Half |
7791                                                SUPPORTED_100baseT_Full |
7792                                                SUPPORTED_1000baseT_Full |
7793                                                SUPPORTED_10000baseT_Full |
7794                                                SUPPORTED_TP |
7795                                                SUPPORTED_Autoneg |
7796                                                SUPPORTED_Pause |
7797                                                SUPPORTED_Asym_Pause);
7798                         break;
7799
7800                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7801                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7802                                   bp->link_params.ext_phy_config);
7803                         break;
7804
7805                 default:
7806                         BNX2X_ERR("NVRAM config error. "
7807                                   "BAD XGXS ext_phy_config 0x%x\n",
7808                                   bp->link_params.ext_phy_config);
7809                         return;
7810                 }
7811
7812                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7813                                            port*0x18);
7814                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7815
7816                 break;
7817
7818         default:
7819                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7820                           bp->port.link_config);
7821                 return;
7822         }
7823         bp->link_params.phy_addr = bp->port.phy_addr;
7824
7825         /* mask what we support according to speed_cap_mask */
7826         if (!(bp->link_params.speed_cap_mask &
7827                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7828                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7829
7830         if (!(bp->link_params.speed_cap_mask &
7831                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7832                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7833
7834         if (!(bp->link_params.speed_cap_mask &
7835                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7836                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7837
7838         if (!(bp->link_params.speed_cap_mask &
7839                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7840                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7841
7842         if (!(bp->link_params.speed_cap_mask &
7843                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7844                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7845                                         SUPPORTED_1000baseT_Full);
7846
7847         if (!(bp->link_params.speed_cap_mask &
7848                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7849                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7850
7851         if (!(bp->link_params.speed_cap_mask &
7852                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7853                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7854
7855         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7856 }
7857
7858 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7859 {
7860         bp->link_params.req_duplex = DUPLEX_FULL;
7861
7862         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7863         case PORT_FEATURE_LINK_SPEED_AUTO:
7864                 if (bp->port.supported & SUPPORTED_Autoneg) {
7865                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7866                         bp->port.advertising = bp->port.supported;
7867                 } else {
7868                         u32 ext_phy_type =
7869                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7870
7871                         if ((ext_phy_type ==
7872                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7873                             (ext_phy_type ==
7874                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7875                                 /* force 10G, no AN */
7876                                 bp->link_params.req_line_speed = SPEED_10000;
7877                                 bp->port.advertising =
7878                                                 (ADVERTISED_10000baseT_Full |
7879                                                  ADVERTISED_FIBRE);
7880                                 break;
7881                         }
7882                         BNX2X_ERR("NVRAM config error. "
7883                                   "Invalid link_config 0x%x"
7884                                   "  Autoneg not supported\n",
7885                                   bp->port.link_config);
7886                         return;
7887                 }
7888                 break;
7889
7890         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7891                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7892                         bp->link_params.req_line_speed = SPEED_10;
7893                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7894                                                 ADVERTISED_TP);
7895                 } else {
7896                         BNX2X_ERR("NVRAM config error. "
7897                                   "Invalid link_config 0x%x"
7898                                   "  speed_cap_mask 0x%x\n",
7899                                   bp->port.link_config,
7900                                   bp->link_params.speed_cap_mask);
7901                         return;
7902                 }
7903                 break;
7904
7905         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7906                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7907                         bp->link_params.req_line_speed = SPEED_10;
7908                         bp->link_params.req_duplex = DUPLEX_HALF;
7909                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7910                                                 ADVERTISED_TP);
7911                 } else {
7912                         BNX2X_ERR("NVRAM config error. "
7913                                   "Invalid link_config 0x%x"
7914                                   "  speed_cap_mask 0x%x\n",
7915                                   bp->port.link_config,
7916                                   bp->link_params.speed_cap_mask);
7917                         return;
7918                 }
7919                 break;
7920
7921         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7922                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7923                         bp->link_params.req_line_speed = SPEED_100;
7924                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7925                                                 ADVERTISED_TP);
7926                 } else {
7927                         BNX2X_ERR("NVRAM config error. "
7928                                   "Invalid link_config 0x%x"
7929                                   "  speed_cap_mask 0x%x\n",
7930                                   bp->port.link_config,
7931                                   bp->link_params.speed_cap_mask);
7932                         return;
7933                 }
7934                 break;
7935
7936         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7937                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7938                         bp->link_params.req_line_speed = SPEED_100;
7939                         bp->link_params.req_duplex = DUPLEX_HALF;
7940                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7941                                                 ADVERTISED_TP);
7942                 } else {
7943                         BNX2X_ERR("NVRAM config error. "
7944                                   "Invalid link_config 0x%x"
7945                                   "  speed_cap_mask 0x%x\n",
7946                                   bp->port.link_config,
7947                                   bp->link_params.speed_cap_mask);
7948                         return;
7949                 }
7950                 break;
7951
7952         case PORT_FEATURE_LINK_SPEED_1G:
7953                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7954                         bp->link_params.req_line_speed = SPEED_1000;
7955                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7956                                                 ADVERTISED_TP);
7957                 } else {
7958                         BNX2X_ERR("NVRAM config error. "
7959                                   "Invalid link_config 0x%x"
7960                                   "  speed_cap_mask 0x%x\n",
7961                                   bp->port.link_config,
7962                                   bp->link_params.speed_cap_mask);
7963                         return;
7964                 }
7965                 break;
7966
7967         case PORT_FEATURE_LINK_SPEED_2_5G:
7968                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7969                         bp->link_params.req_line_speed = SPEED_2500;
7970                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7971                                                 ADVERTISED_TP);
7972                 } else {
7973                         BNX2X_ERR("NVRAM config error. "
7974                                   "Invalid link_config 0x%x"
7975                                   "  speed_cap_mask 0x%x\n",
7976                                   bp->port.link_config,
7977                                   bp->link_params.speed_cap_mask);
7978                         return;
7979                 }
7980                 break;
7981
7982         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7983         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7984         case PORT_FEATURE_LINK_SPEED_10G_KR:
7985                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7986                         bp->link_params.req_line_speed = SPEED_10000;
7987                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7988                                                 ADVERTISED_FIBRE);
7989                 } else {
7990                         BNX2X_ERR("NVRAM config error. "
7991                                   "Invalid link_config 0x%x"
7992                                   "  speed_cap_mask 0x%x\n",
7993                                   bp->port.link_config,
7994                                   bp->link_params.speed_cap_mask);
7995                         return;
7996                 }
7997                 break;
7998
7999         default:
8000                 BNX2X_ERR("NVRAM config error. "
8001                           "BAD link speed link_config 0x%x\n",
8002                           bp->port.link_config);
8003                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8004                 bp->port.advertising = bp->port.supported;
8005                 break;
8006         }
8007
8008         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8009                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8010         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8011             !(bp->port.supported & SUPPORTED_Autoneg))
8012                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8013
8014         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8015                        "  advertising 0x%x\n",
8016                        bp->link_params.req_line_speed,
8017                        bp->link_params.req_duplex,
8018                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8019 }
8020
8021 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8022 {
8023         int port = BP_PORT(bp);
8024         u32 val, val2;
8025         u32 config;
8026         u16 i;
8027
8028         bp->link_params.bp = bp;
8029         bp->link_params.port = port;
8030
8031         bp->link_params.lane_config =
8032                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8033         bp->link_params.ext_phy_config =
8034                 SHMEM_RD(bp,
8035                          dev_info.port_hw_config[port].external_phy_config);
8036         bp->link_params.speed_cap_mask =
8037                 SHMEM_RD(bp,
8038                          dev_info.port_hw_config[port].speed_capability_mask);
8039
8040         bp->port.link_config =
8041                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8042
8043         /* Get the 4 lanes xgxs config rx and tx */
8044         for (i = 0; i < 2; i++) {
8045                 val = SHMEM_RD(bp,
8046                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8047                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8048                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8049
8050                 val = SHMEM_RD(bp,
8051                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8052                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8053                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8054         }
8055
8056         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8057         if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8058                 bp->link_params.feature_config_flags |=
8059                                 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8060         else
8061                 bp->link_params.feature_config_flags &=
8062                                 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8063
8064         /* If the device is capable of WoL, set the default state according
8065          * to the HW
8066          */
8067         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8068                    (config & PORT_FEATURE_WOL_ENABLED));
8069
8070         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8071                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8072                        bp->link_params.lane_config,
8073                        bp->link_params.ext_phy_config,
8074                        bp->link_params.speed_cap_mask, bp->port.link_config);
8075
8076         bp->link_params.switch_cfg = (bp->port.link_config &
8077                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8078         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8079
8080         bnx2x_link_settings_requested(bp);
8081
8082         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8083         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8084         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8085         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8086         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8087         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8088         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8089         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8090         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8091         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8092 }
8093
8094 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8095 {
8096         int func = BP_FUNC(bp);
8097         u32 val, val2;
8098         int rc = 0;
8099
8100         bnx2x_get_common_hwinfo(bp);
8101
8102         bp->e1hov = 0;
8103         bp->e1hmf = 0;
8104         if (CHIP_IS_E1H(bp)) {
8105                 bp->mf_config =
8106                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8107
8108                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8109                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8110                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8111
8112                         bp->e1hov = val;
8113                         bp->e1hmf = 1;
8114                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
8115                                        "(0x%04x)\n",
8116                                        func, bp->e1hov, bp->e1hov);
8117                 } else {
8118                         BNX2X_DEV_INFO("single function mode\n");
8119                         if (BP_E1HVN(bp)) {
8120                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8121                                           "  aborting\n", func);
8122                                 rc = -EPERM;
8123                         }
8124                 }
8125         }
8126
8127         if (!BP_NOMCP(bp)) {
8128                 bnx2x_get_port_hwinfo(bp);
8129
8130                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8131                               DRV_MSG_SEQ_NUMBER_MASK);
8132                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8133         }
8134
8135         if (IS_E1HMF(bp)) {
8136                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8137                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8138                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8139                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8140                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8141                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8142                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8143                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8144                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8145                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8146                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8147                                ETH_ALEN);
8148                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8149                                ETH_ALEN);
8150                 }
8151
8152                 return rc;
8153         }
8154
8155         if (BP_NOMCP(bp)) {
8156                 /* only supposed to happen on emulation/FPGA */
8157                 BNX2X_ERR("warning random MAC workaround active\n");
8158                 random_ether_addr(bp->dev->dev_addr);
8159                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8160         }
8161
8162         return rc;
8163 }
8164
8165 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8166 {
8167         int func = BP_FUNC(bp);
8168         int timer_interval;
8169         int rc;
8170
8171         /* Disable interrupt handling until HW is initialized */
8172         atomic_set(&bp->intr_sem, 1);
8173
8174         mutex_init(&bp->port.phy_mutex);
8175
8176         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8177         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8178
8179         rc = bnx2x_get_hwinfo(bp);
8180
8181         /* need to reset chip if undi was active */
8182         if (!BP_NOMCP(bp))
8183                 bnx2x_undi_unload(bp);
8184
8185         if (CHIP_REV_IS_FPGA(bp))
8186                 printk(KERN_ERR PFX "FPGA detected\n");
8187
8188         if (BP_NOMCP(bp) && (func == 0))
8189                 printk(KERN_ERR PFX
8190                        "MCP disabled, must load devices in order!\n");
8191
8192         /* Set multi queue mode */
8193         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8194             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8195                 printk(KERN_ERR PFX
8196                       "Multi disabled since int_mode requested is not MSI-X\n");
8197                 multi_mode = ETH_RSS_MODE_DISABLED;
8198         }
8199         bp->multi_mode = multi_mode;
8200
8201
8202         /* Set TPA flags */
8203         if (disable_tpa) {
8204                 bp->flags &= ~TPA_ENABLE_FLAG;
8205                 bp->dev->features &= ~NETIF_F_LRO;
8206         } else {
8207                 bp->flags |= TPA_ENABLE_FLAG;
8208                 bp->dev->features |= NETIF_F_LRO;
8209         }
8210
8211         bp->mrrs = mrrs;
8212
8213         bp->tx_ring_size = MAX_TX_AVAIL;
8214         bp->rx_ring_size = MAX_RX_AVAIL;
8215
8216         bp->rx_csum = 1;
8217
8218         bp->tx_ticks = 50;
8219         bp->rx_ticks = 25;
8220
8221         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8222         bp->current_interval = (poll ? poll : timer_interval);
8223
8224         init_timer(&bp->timer);
8225         bp->timer.expires = jiffies + bp->current_interval;
8226         bp->timer.data = (unsigned long) bp;
8227         bp->timer.function = bnx2x_timer;
8228
8229         return rc;
8230 }
8231
8232 /*
8233  * ethtool service functions
8234  */
8235
8236 /* All ethtool functions called with rtnl_lock */
8237
8238 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8239 {
8240         struct bnx2x *bp = netdev_priv(dev);
8241
8242         cmd->supported = bp->port.supported;
8243         cmd->advertising = bp->port.advertising;
8244
8245         if (netif_carrier_ok(dev)) {
8246                 cmd->speed = bp->link_vars.line_speed;
8247                 cmd->duplex = bp->link_vars.duplex;
8248         } else {
8249                 cmd->speed = bp->link_params.req_line_speed;
8250                 cmd->duplex = bp->link_params.req_duplex;
8251         }
8252         if (IS_E1HMF(bp)) {
8253                 u16 vn_max_rate;
8254
8255                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8256                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8257                 if (vn_max_rate < cmd->speed)
8258                         cmd->speed = vn_max_rate;
8259         }
8260
8261         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8262                 u32 ext_phy_type =
8263                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8264
8265                 switch (ext_phy_type) {
8266                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8267                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8268                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8269                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8270                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8271                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8272                         cmd->port = PORT_FIBRE;
8273                         break;
8274
8275                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8276                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8277                         cmd->port = PORT_TP;
8278                         break;
8279
8280                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8281                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8282                                   bp->link_params.ext_phy_config);
8283                         break;
8284
8285                 default:
8286                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8287                            bp->link_params.ext_phy_config);
8288                         break;
8289                 }
8290         } else
8291                 cmd->port = PORT_TP;
8292
8293         cmd->phy_address = bp->port.phy_addr;
8294         cmd->transceiver = XCVR_INTERNAL;
8295
8296         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8297                 cmd->autoneg = AUTONEG_ENABLE;
8298         else
8299                 cmd->autoneg = AUTONEG_DISABLE;
8300
8301         cmd->maxtxpkt = 0;
8302         cmd->maxrxpkt = 0;
8303
8304         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8305            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8306            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8307            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8308            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8309            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8310            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8311
8312         return 0;
8313 }
8314
8315 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8316 {
8317         struct bnx2x *bp = netdev_priv(dev);
8318         u32 advertising;
8319
8320         if (IS_E1HMF(bp))
8321                 return 0;
8322
8323         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8324            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8325            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8326            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8327            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8328            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8329            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8330
8331         if (cmd->autoneg == AUTONEG_ENABLE) {
8332                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8333                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8334                         return -EINVAL;
8335                 }
8336
8337                 /* advertise the requested speed and duplex if supported */
8338                 cmd->advertising &= bp->port.supported;
8339
8340                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8341                 bp->link_params.req_duplex = DUPLEX_FULL;
8342                 bp->port.advertising |= (ADVERTISED_Autoneg |
8343                                          cmd->advertising);
8344
8345         } else { /* forced speed */
8346                 /* advertise the requested speed and duplex if supported */
8347                 switch (cmd->speed) {
8348                 case SPEED_10:
8349                         if (cmd->duplex == DUPLEX_FULL) {
8350                                 if (!(bp->port.supported &
8351                                       SUPPORTED_10baseT_Full)) {
8352                                         DP(NETIF_MSG_LINK,
8353                                            "10M full not supported\n");
8354                                         return -EINVAL;
8355                                 }
8356
8357                                 advertising = (ADVERTISED_10baseT_Full |
8358                                                ADVERTISED_TP);
8359                         } else {
8360                                 if (!(bp->port.supported &
8361                                       SUPPORTED_10baseT_Half)) {
8362                                         DP(NETIF_MSG_LINK,
8363                                            "10M half not supported\n");
8364                                         return -EINVAL;
8365                                 }
8366
8367                                 advertising = (ADVERTISED_10baseT_Half |
8368                                                ADVERTISED_TP);
8369                         }
8370                         break;
8371
8372                 case SPEED_100:
8373                         if (cmd->duplex == DUPLEX_FULL) {
8374                                 if (!(bp->port.supported &
8375                                                 SUPPORTED_100baseT_Full)) {
8376                                         DP(NETIF_MSG_LINK,
8377                                            "100M full not supported\n");
8378                                         return -EINVAL;
8379                                 }
8380
8381                                 advertising = (ADVERTISED_100baseT_Full |
8382                                                ADVERTISED_TP);
8383                         } else {
8384                                 if (!(bp->port.supported &
8385                                                 SUPPORTED_100baseT_Half)) {
8386                                         DP(NETIF_MSG_LINK,
8387                                            "100M half not supported\n");
8388                                         return -EINVAL;
8389                                 }
8390
8391                                 advertising = (ADVERTISED_100baseT_Half |
8392                                                ADVERTISED_TP);
8393                         }
8394                         break;
8395
8396                 case SPEED_1000:
8397                         if (cmd->duplex != DUPLEX_FULL) {
8398                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8399                                 return -EINVAL;
8400                         }
8401
8402                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8403                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8404                                 return -EINVAL;
8405                         }
8406
8407                         advertising = (ADVERTISED_1000baseT_Full |
8408                                        ADVERTISED_TP);
8409                         break;
8410
8411                 case SPEED_2500:
8412                         if (cmd->duplex != DUPLEX_FULL) {
8413                                 DP(NETIF_MSG_LINK,
8414                                    "2.5G half not supported\n");
8415                                 return -EINVAL;
8416                         }
8417
8418                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8419                                 DP(NETIF_MSG_LINK,
8420                                    "2.5G full not supported\n");
8421                                 return -EINVAL;
8422                         }
8423
8424                         advertising = (ADVERTISED_2500baseX_Full |
8425                                        ADVERTISED_TP);
8426                         break;
8427
8428                 case SPEED_10000:
8429                         if (cmd->duplex != DUPLEX_FULL) {
8430                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8431                                 return -EINVAL;
8432                         }
8433
8434                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8435                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8436                                 return -EINVAL;
8437                         }
8438
8439                         advertising = (ADVERTISED_10000baseT_Full |
8440                                        ADVERTISED_FIBRE);
8441                         break;
8442
8443                 default:
8444                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8445                         return -EINVAL;
8446                 }
8447
8448                 bp->link_params.req_line_speed = cmd->speed;
8449                 bp->link_params.req_duplex = cmd->duplex;
8450                 bp->port.advertising = advertising;
8451         }
8452
8453         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8454            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8455            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8456            bp->port.advertising);
8457
8458         if (netif_running(dev)) {
8459                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8460                 bnx2x_link_set(bp);
8461         }
8462
8463         return 0;
8464 }
8465
8466 #define PHY_FW_VER_LEN                  10
8467
8468 static void bnx2x_get_drvinfo(struct net_device *dev,
8469                               struct ethtool_drvinfo *info)
8470 {
8471         struct bnx2x *bp = netdev_priv(dev);
8472         u8 phy_fw_ver[PHY_FW_VER_LEN];
8473
8474         strcpy(info->driver, DRV_MODULE_NAME);
8475         strcpy(info->version, DRV_MODULE_VERSION);
8476
8477         phy_fw_ver[0] = '\0';
8478         if (bp->port.pmf) {
8479                 bnx2x_acquire_phy_lock(bp);
8480                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8481                                              (bp->state != BNX2X_STATE_CLOSED),
8482                                              phy_fw_ver, PHY_FW_VER_LEN);
8483                 bnx2x_release_phy_lock(bp);
8484         }
8485
8486         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8487                  (bp->common.bc_ver & 0xff0000) >> 16,
8488                  (bp->common.bc_ver & 0xff00) >> 8,
8489                  (bp->common.bc_ver & 0xff),
8490                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8491         strcpy(info->bus_info, pci_name(bp->pdev));
8492         info->n_stats = BNX2X_NUM_STATS;
8493         info->testinfo_len = BNX2X_NUM_TESTS;
8494         info->eedump_len = bp->common.flash_size;
8495         info->regdump_len = 0;
8496 }
8497
8498 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8499 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8500
8501 static int bnx2x_get_regs_len(struct net_device *dev)
8502 {
8503         static u32 regdump_len;
8504         struct bnx2x *bp = netdev_priv(dev);
8505         int i;
8506
8507         if (regdump_len)
8508                 return regdump_len;
8509
8510         if (CHIP_IS_E1(bp)) {
8511                 for (i = 0; i < REGS_COUNT; i++)
8512                         if (IS_E1_ONLINE(reg_addrs[i].info))
8513                                 regdump_len += reg_addrs[i].size;
8514
8515                 for (i = 0; i < WREGS_COUNT_E1; i++)
8516                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8517                                 regdump_len += wreg_addrs_e1[i].size *
8518                                         (1 + wreg_addrs_e1[i].read_regs_count);
8519
8520         } else { /* E1H */
8521                 for (i = 0; i < REGS_COUNT; i++)
8522                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8523                                 regdump_len += reg_addrs[i].size;
8524
8525                 for (i = 0; i < WREGS_COUNT_E1H; i++)
8526                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8527                                 regdump_len += wreg_addrs_e1h[i].size *
8528                                         (1 + wreg_addrs_e1h[i].read_regs_count);
8529         }
8530         regdump_len *= 4;
8531         regdump_len += sizeof(struct dump_hdr);
8532
8533         return regdump_len;
8534 }
8535
8536 static void bnx2x_get_regs(struct net_device *dev,
8537                            struct ethtool_regs *regs, void *_p)
8538 {
8539         u32 *p = _p, i, j;
8540         struct bnx2x *bp = netdev_priv(dev);
8541         struct dump_hdr dump_hdr = {0};
8542
8543         regs->version = 0;
8544         memset(p, 0, regs->len);
8545
8546         if (!netif_running(bp->dev))
8547                 return;
8548
8549         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8550         dump_hdr.dump_sign = dump_sign_all;
8551         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8552         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8553         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8554         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8555         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8556
8557         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8558         p += dump_hdr.hdr_size + 1;
8559
8560         if (CHIP_IS_E1(bp)) {
8561                 for (i = 0; i < REGS_COUNT; i++)
8562                         if (IS_E1_ONLINE(reg_addrs[i].info))
8563                                 for (j = 0; j < reg_addrs[i].size; j++)
8564                                         *p++ = REG_RD(bp,
8565                                                       reg_addrs[i].addr + j*4);
8566
8567         } else { /* E1H */
8568                 for (i = 0; i < REGS_COUNT; i++)
8569                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8570                                 for (j = 0; j < reg_addrs[i].size; j++)
8571                                         *p++ = REG_RD(bp,
8572                                                       reg_addrs[i].addr + j*4);
8573         }
8574 }
8575
8576 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8577 {
8578         struct bnx2x *bp = netdev_priv(dev);
8579
8580         if (bp->flags & NO_WOL_FLAG) {
8581                 wol->supported = 0;
8582                 wol->wolopts = 0;
8583         } else {
8584                 wol->supported = WAKE_MAGIC;
8585                 if (bp->wol)
8586                         wol->wolopts = WAKE_MAGIC;
8587                 else
8588                         wol->wolopts = 0;
8589         }
8590         memset(&wol->sopass, 0, sizeof(wol->sopass));
8591 }
8592
8593 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8594 {
8595         struct bnx2x *bp = netdev_priv(dev);
8596
8597         if (wol->wolopts & ~WAKE_MAGIC)
8598                 return -EINVAL;
8599
8600         if (wol->wolopts & WAKE_MAGIC) {
8601                 if (bp->flags & NO_WOL_FLAG)
8602                         return -EINVAL;
8603
8604                 bp->wol = 1;
8605         } else
8606                 bp->wol = 0;
8607
8608         return 0;
8609 }
8610
8611 static u32 bnx2x_get_msglevel(struct net_device *dev)
8612 {
8613         struct bnx2x *bp = netdev_priv(dev);
8614
8615         return bp->msglevel;
8616 }
8617
8618 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8619 {
8620         struct bnx2x *bp = netdev_priv(dev);
8621
8622         if (capable(CAP_NET_ADMIN))
8623                 bp->msglevel = level;
8624 }
8625
8626 static int bnx2x_nway_reset(struct net_device *dev)
8627 {
8628         struct bnx2x *bp = netdev_priv(dev);
8629
8630         if (!bp->port.pmf)
8631                 return 0;
8632
8633         if (netif_running(dev)) {
8634                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8635                 bnx2x_link_set(bp);
8636         }
8637
8638         return 0;
8639 }
8640
8641 static u32
8642 bnx2x_get_link(struct net_device *dev)
8643 {
8644         struct bnx2x *bp = netdev_priv(dev);
8645
8646         return bp->link_vars.link_up;
8647 }
8648
8649 static int bnx2x_get_eeprom_len(struct net_device *dev)
8650 {
8651         struct bnx2x *bp = netdev_priv(dev);
8652
8653         return bp->common.flash_size;
8654 }
8655
8656 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8657 {
8658         int port = BP_PORT(bp);
8659         int count, i;
8660         u32 val = 0;
8661
8662         /* adjust timeout for emulation/FPGA */
8663         count = NVRAM_TIMEOUT_COUNT;
8664         if (CHIP_REV_IS_SLOW(bp))
8665                 count *= 100;
8666
8667         /* request access to nvram interface */
8668         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8669                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8670
8671         for (i = 0; i < count*10; i++) {
8672                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8673                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8674                         break;
8675
8676                 udelay(5);
8677         }
8678
8679         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8680                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8681                 return -EBUSY;
8682         }
8683
8684         return 0;
8685 }
8686
8687 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8688 {
8689         int port = BP_PORT(bp);
8690         int count, i;
8691         u32 val = 0;
8692
8693         /* adjust timeout for emulation/FPGA */
8694         count = NVRAM_TIMEOUT_COUNT;
8695         if (CHIP_REV_IS_SLOW(bp))
8696                 count *= 100;
8697
8698         /* relinquish nvram interface */
8699         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8700                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8701
8702         for (i = 0; i < count*10; i++) {
8703                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8704                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8705                         break;
8706
8707                 udelay(5);
8708         }
8709
8710         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8711                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8712                 return -EBUSY;
8713         }
8714
8715         return 0;
8716 }
8717
8718 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8719 {
8720         u32 val;
8721
8722         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8723
8724         /* enable both bits, even on read */
8725         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8726                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8727                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8728 }
8729
8730 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8731 {
8732         u32 val;
8733
8734         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8735
8736         /* disable both bits, even after read */
8737         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8738                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8739                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8740 }
8741
8742 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8743                                   u32 cmd_flags)
8744 {
8745         int count, i, rc;
8746         u32 val;
8747
8748         /* build the command word */
8749         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8750
8751         /* need to clear DONE bit separately */
8752         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8753
8754         /* address of the NVRAM to read from */
8755         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8756                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8757
8758         /* issue a read command */
8759         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8760
8761         /* adjust timeout for emulation/FPGA */
8762         count = NVRAM_TIMEOUT_COUNT;
8763         if (CHIP_REV_IS_SLOW(bp))
8764                 count *= 100;
8765
8766         /* wait for completion */
8767         *ret_val = 0;
8768         rc = -EBUSY;
8769         for (i = 0; i < count; i++) {
8770                 udelay(5);
8771                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8772
8773                 if (val & MCPR_NVM_COMMAND_DONE) {
8774                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8775                         /* we read nvram data in cpu order
8776                          * but ethtool sees it as an array of bytes
8777                          * converting to big-endian will do the work */
8778                         *ret_val = cpu_to_be32(val);
8779                         rc = 0;
8780                         break;
8781                 }
8782         }
8783
8784         return rc;
8785 }
8786
8787 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8788                             int buf_size)
8789 {
8790         int rc;
8791         u32 cmd_flags;
8792         __be32 val;
8793
8794         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8795                 DP(BNX2X_MSG_NVM,
8796                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8797                    offset, buf_size);
8798                 return -EINVAL;
8799         }
8800
8801         if (offset + buf_size > bp->common.flash_size) {
8802                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8803                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8804                    offset, buf_size, bp->common.flash_size);
8805                 return -EINVAL;
8806         }
8807
8808         /* request access to nvram interface */
8809         rc = bnx2x_acquire_nvram_lock(bp);
8810         if (rc)
8811                 return rc;
8812
8813         /* enable access to nvram interface */
8814         bnx2x_enable_nvram_access(bp);
8815
8816         /* read the first word(s) */
8817         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8818         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8819                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8820                 memcpy(ret_buf, &val, 4);
8821
8822                 /* advance to the next dword */
8823                 offset += sizeof(u32);
8824                 ret_buf += sizeof(u32);
8825                 buf_size -= sizeof(u32);
8826                 cmd_flags = 0;
8827         }
8828
8829         if (rc == 0) {
8830                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8831                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8832                 memcpy(ret_buf, &val, 4);
8833         }
8834
8835         /* disable access to nvram interface */
8836         bnx2x_disable_nvram_access(bp);
8837         bnx2x_release_nvram_lock(bp);
8838
8839         return rc;
8840 }
8841
8842 static int bnx2x_get_eeprom(struct net_device *dev,
8843                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8844 {
8845         struct bnx2x *bp = netdev_priv(dev);
8846         int rc;
8847
8848         if (!netif_running(dev))
8849                 return -EAGAIN;
8850
8851         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8852            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8853            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8854            eeprom->len, eeprom->len);
8855
8856         /* parameters already validated in ethtool_get_eeprom */
8857
8858         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8859
8860         return rc;
8861 }
8862
8863 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8864                                    u32 cmd_flags)
8865 {
8866         int count, i, rc;
8867
8868         /* build the command word */
8869         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8870
8871         /* need to clear DONE bit separately */
8872         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8873
8874         /* write the data */
8875         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8876
8877         /* address of the NVRAM to write to */
8878         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8879                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8880
8881         /* issue the write command */
8882         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8883
8884         /* adjust timeout for emulation/FPGA */
8885         count = NVRAM_TIMEOUT_COUNT;
8886         if (CHIP_REV_IS_SLOW(bp))
8887                 count *= 100;
8888
8889         /* wait for completion */
8890         rc = -EBUSY;
8891         for (i = 0; i < count; i++) {
8892                 udelay(5);
8893                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8894                 if (val & MCPR_NVM_COMMAND_DONE) {
8895                         rc = 0;
8896                         break;
8897                 }
8898         }
8899
8900         return rc;
8901 }
8902
8903 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8904
8905 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8906                               int buf_size)
8907 {
8908         int rc;
8909         u32 cmd_flags;
8910         u32 align_offset;
8911         __be32 val;
8912
8913         if (offset + buf_size > bp->common.flash_size) {
8914                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8915                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8916                    offset, buf_size, bp->common.flash_size);
8917                 return -EINVAL;
8918         }
8919
8920         /* request access to nvram interface */
8921         rc = bnx2x_acquire_nvram_lock(bp);
8922         if (rc)
8923                 return rc;
8924
8925         /* enable access to nvram interface */
8926         bnx2x_enable_nvram_access(bp);
8927
8928         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8929         align_offset = (offset & ~0x03);
8930         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8931
8932         if (rc == 0) {
8933                 val &= ~(0xff << BYTE_OFFSET(offset));
8934                 val |= (*data_buf << BYTE_OFFSET(offset));
8935
8936                 /* nvram data is returned as an array of bytes
8937                  * convert it back to cpu order */
8938                 val = be32_to_cpu(val);
8939
8940                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8941                                              cmd_flags);
8942         }
8943
8944         /* disable access to nvram interface */
8945         bnx2x_disable_nvram_access(bp);
8946         bnx2x_release_nvram_lock(bp);
8947
8948         return rc;
8949 }
8950
8951 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8952                              int buf_size)
8953 {
8954         int rc;
8955         u32 cmd_flags;
8956         u32 val;
8957         u32 written_so_far;
8958
8959         if (buf_size == 1)      /* ethtool */
8960                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8961
8962         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8963                 DP(BNX2X_MSG_NVM,
8964                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8965                    offset, buf_size);
8966                 return -EINVAL;
8967         }
8968
8969         if (offset + buf_size > bp->common.flash_size) {
8970                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8971                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8972                    offset, buf_size, bp->common.flash_size);
8973                 return -EINVAL;
8974         }
8975
8976         /* request access to nvram interface */
8977         rc = bnx2x_acquire_nvram_lock(bp);
8978         if (rc)
8979                 return rc;
8980
8981         /* enable access to nvram interface */
8982         bnx2x_enable_nvram_access(bp);
8983
8984         written_so_far = 0;
8985         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8986         while ((written_so_far < buf_size) && (rc == 0)) {
8987                 if (written_so_far == (buf_size - sizeof(u32)))
8988                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8989                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8990                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8991                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8992                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8993
8994                 memcpy(&val, data_buf, 4);
8995
8996                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8997
8998                 /* advance to the next dword */
8999                 offset += sizeof(u32);
9000                 data_buf += sizeof(u32);
9001                 written_so_far += sizeof(u32);
9002                 cmd_flags = 0;
9003         }
9004
9005         /* disable access to nvram interface */
9006         bnx2x_disable_nvram_access(bp);
9007         bnx2x_release_nvram_lock(bp);
9008
9009         return rc;
9010 }
9011
9012 static int bnx2x_set_eeprom(struct net_device *dev,
9013                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9014 {
9015         struct bnx2x *bp = netdev_priv(dev);
9016         int rc;
9017
9018         if (!netif_running(dev))
9019                 return -EAGAIN;
9020
9021         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9022            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9023            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9024            eeprom->len, eeprom->len);
9025
9026         /* parameters already validated in ethtool_set_eeprom */
9027
9028         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9029         if (eeprom->magic == 0x00504859)
9030                 if (bp->port.pmf) {
9031
9032                         bnx2x_acquire_phy_lock(bp);
9033                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
9034                                              bp->link_params.ext_phy_config,
9035                                              (bp->state != BNX2X_STATE_CLOSED),
9036                                              eebuf, eeprom->len);
9037                         if ((bp->state == BNX2X_STATE_OPEN) ||
9038                             (bp->state == BNX2X_STATE_DISABLED)) {
9039                                 rc |= bnx2x_link_reset(&bp->link_params,
9040                                                        &bp->link_vars, 1);
9041                                 rc |= bnx2x_phy_init(&bp->link_params,
9042                                                      &bp->link_vars);
9043                         }
9044                         bnx2x_release_phy_lock(bp);
9045
9046                 } else /* Only the PMF can access the PHY */
9047                         return -EINVAL;
9048         else
9049                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9050
9051         return rc;
9052 }
9053
9054 static int bnx2x_get_coalesce(struct net_device *dev,
9055                               struct ethtool_coalesce *coal)
9056 {
9057         struct bnx2x *bp = netdev_priv(dev);
9058
9059         memset(coal, 0, sizeof(struct ethtool_coalesce));
9060
9061         coal->rx_coalesce_usecs = bp->rx_ticks;
9062         coal->tx_coalesce_usecs = bp->tx_ticks;
9063
9064         return 0;
9065 }
9066
9067 static int bnx2x_set_coalesce(struct net_device *dev,
9068                               struct ethtool_coalesce *coal)
9069 {
9070         struct bnx2x *bp = netdev_priv(dev);
9071
9072         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9073         if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9074                 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9075
9076         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9077         if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9078                 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9079
9080         if (netif_running(dev))
9081                 bnx2x_update_coalesce(bp);
9082
9083         return 0;
9084 }
9085
9086 static void bnx2x_get_ringparam(struct net_device *dev,
9087                                 struct ethtool_ringparam *ering)
9088 {
9089         struct bnx2x *bp = netdev_priv(dev);
9090
9091         ering->rx_max_pending = MAX_RX_AVAIL;
9092         ering->rx_mini_max_pending = 0;
9093         ering->rx_jumbo_max_pending = 0;
9094
9095         ering->rx_pending = bp->rx_ring_size;
9096         ering->rx_mini_pending = 0;
9097         ering->rx_jumbo_pending = 0;
9098
9099         ering->tx_max_pending = MAX_TX_AVAIL;
9100         ering->tx_pending = bp->tx_ring_size;
9101 }
9102
9103 static int bnx2x_set_ringparam(struct net_device *dev,
9104                                struct ethtool_ringparam *ering)
9105 {
9106         struct bnx2x *bp = netdev_priv(dev);
9107         int rc = 0;
9108
9109         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9110             (ering->tx_pending > MAX_TX_AVAIL) ||
9111             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9112                 return -EINVAL;
9113
9114         bp->rx_ring_size = ering->rx_pending;
9115         bp->tx_ring_size = ering->tx_pending;
9116
9117         if (netif_running(dev)) {
9118                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9119                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9120         }
9121
9122         return rc;
9123 }
9124
9125 static void bnx2x_get_pauseparam(struct net_device *dev,
9126                                  struct ethtool_pauseparam *epause)
9127 {
9128         struct bnx2x *bp = netdev_priv(dev);
9129
9130         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9131                            BNX2X_FLOW_CTRL_AUTO) &&
9132                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9133
9134         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9135                             BNX2X_FLOW_CTRL_RX);
9136         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9137                             BNX2X_FLOW_CTRL_TX);
9138
9139         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9140            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9141            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9142 }
9143
9144 static int bnx2x_set_pauseparam(struct net_device *dev,
9145                                 struct ethtool_pauseparam *epause)
9146 {
9147         struct bnx2x *bp = netdev_priv(dev);
9148
9149         if (IS_E1HMF(bp))
9150                 return 0;
9151
9152         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9153            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9154            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9155
9156         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9157
9158         if (epause->rx_pause)
9159                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9160
9161         if (epause->tx_pause)
9162                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9163
9164         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9165                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9166
9167         if (epause->autoneg) {
9168                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9169                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9170                         return -EINVAL;
9171                 }
9172
9173                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9174                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9175         }
9176
9177         DP(NETIF_MSG_LINK,
9178            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9179
9180         if (netif_running(dev)) {
9181                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9182                 bnx2x_link_set(bp);
9183         }
9184
9185         return 0;
9186 }
9187
9188 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9189 {
9190         struct bnx2x *bp = netdev_priv(dev);
9191         int changed = 0;
9192         int rc = 0;
9193
9194         /* TPA requires Rx CSUM offloading */
9195         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9196                 if (!(dev->features & NETIF_F_LRO)) {
9197                         dev->features |= NETIF_F_LRO;
9198                         bp->flags |= TPA_ENABLE_FLAG;
9199                         changed = 1;
9200                 }
9201
9202         } else if (dev->features & NETIF_F_LRO) {
9203                 dev->features &= ~NETIF_F_LRO;
9204                 bp->flags &= ~TPA_ENABLE_FLAG;
9205                 changed = 1;
9206         }
9207
9208         if (changed && netif_running(dev)) {
9209                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9210                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9211         }
9212
9213         return rc;
9214 }
9215
9216 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9217 {
9218         struct bnx2x *bp = netdev_priv(dev);
9219
9220         return bp->rx_csum;
9221 }
9222
9223 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9224 {
9225         struct bnx2x *bp = netdev_priv(dev);
9226         int rc = 0;
9227
9228         bp->rx_csum = data;
9229
9230         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9231            TPA'ed packets will be discarded due to wrong TCP CSUM */
9232         if (!data) {
9233                 u32 flags = ethtool_op_get_flags(dev);
9234
9235                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9236         }
9237
9238         return rc;
9239 }
9240
9241 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9242 {
9243         if (data) {
9244                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9245                 dev->features |= NETIF_F_TSO6;
9246         } else {
9247                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9248                 dev->features &= ~NETIF_F_TSO6;
9249         }
9250
9251         return 0;
9252 }
9253
9254 static const struct {
9255         char string[ETH_GSTRING_LEN];
9256 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9257         { "register_test (offline)" },
9258         { "memory_test (offline)" },
9259         { "loopback_test (offline)" },
9260         { "nvram_test (online)" },
9261         { "interrupt_test (online)" },
9262         { "link_test (online)" },
9263         { "idle check (online)" }
9264 };
9265
9266 static int bnx2x_self_test_count(struct net_device *dev)
9267 {
9268         return BNX2X_NUM_TESTS;
9269 }
9270
9271 static int bnx2x_test_registers(struct bnx2x *bp)
9272 {
9273         int idx, i, rc = -ENODEV;
9274         u32 wr_val = 0;
9275         int port = BP_PORT(bp);
9276         static const struct {
9277                 u32  offset0;
9278                 u32  offset1;
9279                 u32  mask;
9280         } reg_tbl[] = {
9281 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9282                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9283                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9284                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9285                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9286                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9287                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9288                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9289                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9290                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9291 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9292                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9293                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9294                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9295                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9296                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9297                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9298                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9299                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
9300                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9301 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9302                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9303                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9304                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9305                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9306                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9307                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9308                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9309                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9310                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9311 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9312                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9313                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9314                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9315                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9316                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9317                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9318                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9319
9320                 { 0xffffffff, 0, 0x00000000 }
9321         };
9322
9323         if (!netif_running(bp->dev))
9324                 return rc;
9325
9326         /* Repeat the test twice:
9327            First by writing 0x00000000, second by writing 0xffffffff */
9328         for (idx = 0; idx < 2; idx++) {
9329
9330                 switch (idx) {
9331                 case 0:
9332                         wr_val = 0;
9333                         break;
9334                 case 1:
9335                         wr_val = 0xffffffff;
9336                         break;
9337                 }
9338
9339                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9340                         u32 offset, mask, save_val, val;
9341
9342                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9343                         mask = reg_tbl[i].mask;
9344
9345                         save_val = REG_RD(bp, offset);
9346
9347                         REG_WR(bp, offset, wr_val);
9348                         val = REG_RD(bp, offset);
9349
9350                         /* Restore the original register's value */
9351                         REG_WR(bp, offset, save_val);
9352
9353                         /* verify that value is as expected value */
9354                         if ((val & mask) != (wr_val & mask))
9355                                 goto test_reg_exit;
9356                 }
9357         }
9358
9359         rc = 0;
9360
9361 test_reg_exit:
9362         return rc;
9363 }
9364
9365 static int bnx2x_test_memory(struct bnx2x *bp)
9366 {
9367         int i, j, rc = -ENODEV;
9368         u32 val;
9369         static const struct {
9370                 u32 offset;
9371                 int size;
9372         } mem_tbl[] = {
9373                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9374                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9375                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9376                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9377                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9378                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9379                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9380
9381                 { 0xffffffff, 0 }
9382         };
9383         static const struct {
9384                 char *name;
9385                 u32 offset;
9386                 u32 e1_mask;
9387                 u32 e1h_mask;
9388         } prty_tbl[] = {
9389                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9390                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9391                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9392                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9393                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9394                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9395
9396                 { NULL, 0xffffffff, 0, 0 }
9397         };
9398
9399         if (!netif_running(bp->dev))
9400                 return rc;
9401
9402         /* Go through all the memories */
9403         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9404                 for (j = 0; j < mem_tbl[i].size; j++)
9405                         REG_RD(bp, mem_tbl[i].offset + j*4);
9406
9407         /* Check the parity status */
9408         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9409                 val = REG_RD(bp, prty_tbl[i].offset);
9410                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9411                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9412                         DP(NETIF_MSG_HW,
9413                            "%s is 0x%x\n", prty_tbl[i].name, val);
9414                         goto test_mem_exit;
9415                 }
9416         }
9417
9418         rc = 0;
9419
9420 test_mem_exit:
9421         return rc;
9422 }
9423
9424 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9425 {
9426         int cnt = 1000;
9427
9428         if (link_up)
9429                 while (bnx2x_link_test(bp) && cnt--)
9430                         msleep(10);
9431 }
9432
9433 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9434 {
9435         unsigned int pkt_size, num_pkts, i;
9436         struct sk_buff *skb;
9437         unsigned char *packet;
9438         struct bnx2x_fastpath *fp = &bp->fp[0];
9439         u16 tx_start_idx, tx_idx;
9440         u16 rx_start_idx, rx_idx;
9441         u16 pkt_prod;
9442         struct sw_tx_bd *tx_buf;
9443         struct eth_tx_bd *tx_bd;
9444         dma_addr_t mapping;
9445         union eth_rx_cqe *cqe;
9446         u8 cqe_fp_flags;
9447         struct sw_rx_bd *rx_buf;
9448         u16 len;
9449         int rc = -ENODEV;
9450
9451         /* check the loopback mode */
9452         switch (loopback_mode) {
9453         case BNX2X_PHY_LOOPBACK:
9454                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9455                         return -EINVAL;
9456                 break;
9457         case BNX2X_MAC_LOOPBACK:
9458                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9459                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9460                 break;
9461         default:
9462                 return -EINVAL;
9463         }
9464
9465         /* prepare the loopback packet */
9466         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9467                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9468         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9469         if (!skb) {
9470                 rc = -ENOMEM;
9471                 goto test_loopback_exit;
9472         }
9473         packet = skb_put(skb, pkt_size);
9474         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9475         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9476         for (i = ETH_HLEN; i < pkt_size; i++)
9477                 packet[i] = (unsigned char) (i & 0xff);
9478
9479         /* send the loopback packet */
9480         num_pkts = 0;
9481         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9482         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9483
9484         pkt_prod = fp->tx_pkt_prod++;
9485         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9486         tx_buf->first_bd = fp->tx_bd_prod;
9487         tx_buf->skb = skb;
9488
9489         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9490         mapping = pci_map_single(bp->pdev, skb->data,
9491                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9492         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9493         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9494         tx_bd->nbd = cpu_to_le16(1);
9495         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9496         tx_bd->vlan = cpu_to_le16(pkt_prod);
9497         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9498                                        ETH_TX_BD_FLAGS_END_BD);
9499         tx_bd->general_data = ((UNICAST_ADDRESS <<
9500                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9501
9502         wmb();
9503
9504         le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9505         mb(); /* FW restriction: must not reorder writing nbd and packets */
9506         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9507         DOORBELL(bp, fp->index, 0);
9508
9509         mmiowb();
9510
9511         num_pkts++;
9512         fp->tx_bd_prod++;
9513         bp->dev->trans_start = jiffies;
9514
9515         udelay(100);
9516
9517         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9518         if (tx_idx != tx_start_idx + num_pkts)
9519                 goto test_loopback_exit;
9520
9521         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9522         if (rx_idx != rx_start_idx + num_pkts)
9523                 goto test_loopback_exit;
9524
9525         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9526         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9527         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9528                 goto test_loopback_rx_exit;
9529
9530         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9531         if (len != pkt_size)
9532                 goto test_loopback_rx_exit;
9533
9534         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9535         skb = rx_buf->skb;
9536         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9537         for (i = ETH_HLEN; i < pkt_size; i++)
9538                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9539                         goto test_loopback_rx_exit;
9540
9541         rc = 0;
9542
9543 test_loopback_rx_exit:
9544
9545         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9546         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9547         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9548         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9549
9550         /* Update producers */
9551         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9552                              fp->rx_sge_prod);
9553
9554 test_loopback_exit:
9555         bp->link_params.loopback_mode = LOOPBACK_NONE;
9556
9557         return rc;
9558 }
9559
9560 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9561 {
9562         int rc = 0, res;
9563
9564         if (!netif_running(bp->dev))
9565                 return BNX2X_LOOPBACK_FAILED;
9566
9567         bnx2x_netif_stop(bp, 1);
9568         bnx2x_acquire_phy_lock(bp);
9569
9570         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9571         if (res) {
9572                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
9573                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9574         }
9575
9576         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9577         if (res) {
9578                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
9579                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9580         }
9581
9582         bnx2x_release_phy_lock(bp);
9583         bnx2x_netif_start(bp);
9584
9585         return rc;
9586 }
9587
9588 #define CRC32_RESIDUAL                  0xdebb20e3
9589
9590 static int bnx2x_test_nvram(struct bnx2x *bp)
9591 {
9592         static const struct {
9593                 int offset;
9594                 int size;
9595         } nvram_tbl[] = {
9596                 {     0,  0x14 }, /* bootstrap */
9597                 {  0x14,  0xec }, /* dir */
9598                 { 0x100, 0x350 }, /* manuf_info */
9599                 { 0x450,  0xf0 }, /* feature_info */
9600                 { 0x640,  0x64 }, /* upgrade_key_info */
9601                 { 0x6a4,  0x64 },
9602                 { 0x708,  0x70 }, /* manuf_key_info */
9603                 { 0x778,  0x70 },
9604                 {     0,     0 }
9605         };
9606         __be32 buf[0x350 / 4];
9607         u8 *data = (u8 *)buf;
9608         int i, rc;
9609         u32 magic, csum;
9610
9611         rc = bnx2x_nvram_read(bp, 0, data, 4);
9612         if (rc) {
9613                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9614                 goto test_nvram_exit;
9615         }
9616
9617         magic = be32_to_cpu(buf[0]);
9618         if (magic != 0x669955aa) {
9619                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9620                 rc = -ENODEV;
9621                 goto test_nvram_exit;
9622         }
9623
9624         for (i = 0; nvram_tbl[i].size; i++) {
9625
9626                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9627                                       nvram_tbl[i].size);
9628                 if (rc) {
9629                         DP(NETIF_MSG_PROBE,
9630                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9631                         goto test_nvram_exit;
9632                 }
9633
9634                 csum = ether_crc_le(nvram_tbl[i].size, data);
9635                 if (csum != CRC32_RESIDUAL) {
9636                         DP(NETIF_MSG_PROBE,
9637                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9638                         rc = -ENODEV;
9639                         goto test_nvram_exit;
9640                 }
9641         }
9642
9643 test_nvram_exit:
9644         return rc;
9645 }
9646
9647 static int bnx2x_test_intr(struct bnx2x *bp)
9648 {
9649         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9650         int i, rc;
9651
9652         if (!netif_running(bp->dev))
9653                 return -ENODEV;
9654
9655         config->hdr.length = 0;
9656         if (CHIP_IS_E1(bp))
9657                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9658         else
9659                 config->hdr.offset = BP_FUNC(bp);
9660         config->hdr.client_id = bp->fp->cl_id;
9661         config->hdr.reserved1 = 0;
9662
9663         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9664                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9665                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9666         if (rc == 0) {
9667                 bp->set_mac_pending++;
9668                 for (i = 0; i < 10; i++) {
9669                         if (!bp->set_mac_pending)
9670                                 break;
9671                         msleep_interruptible(10);
9672                 }
9673                 if (i == 10)
9674                         rc = -ENODEV;
9675         }
9676
9677         return rc;
9678 }
9679
9680 static void bnx2x_self_test(struct net_device *dev,
9681                             struct ethtool_test *etest, u64 *buf)
9682 {
9683         struct bnx2x *bp = netdev_priv(dev);
9684
9685         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9686
9687         if (!netif_running(dev))
9688                 return;
9689
9690         /* offline tests are not supported in MF mode */
9691         if (IS_E1HMF(bp))
9692                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9693
9694         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9695                 u8 link_up;
9696
9697                 link_up = bp->link_vars.link_up;
9698                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9699                 bnx2x_nic_load(bp, LOAD_DIAG);
9700                 /* wait until link state is restored */
9701                 bnx2x_wait_for_link(bp, link_up);
9702
9703                 if (bnx2x_test_registers(bp) != 0) {
9704                         buf[0] = 1;
9705                         etest->flags |= ETH_TEST_FL_FAILED;
9706                 }
9707                 if (bnx2x_test_memory(bp) != 0) {
9708                         buf[1] = 1;
9709                         etest->flags |= ETH_TEST_FL_FAILED;
9710                 }
9711                 buf[2] = bnx2x_test_loopback(bp, link_up);
9712                 if (buf[2] != 0)
9713                         etest->flags |= ETH_TEST_FL_FAILED;
9714
9715                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9716                 bnx2x_nic_load(bp, LOAD_NORMAL);
9717                 /* wait until link state is restored */
9718                 bnx2x_wait_for_link(bp, link_up);
9719         }
9720         if (bnx2x_test_nvram(bp) != 0) {
9721                 buf[3] = 1;
9722                 etest->flags |= ETH_TEST_FL_FAILED;
9723         }
9724         if (bnx2x_test_intr(bp) != 0) {
9725                 buf[4] = 1;
9726                 etest->flags |= ETH_TEST_FL_FAILED;
9727         }
9728         if (bp->port.pmf)
9729                 if (bnx2x_link_test(bp) != 0) {
9730                         buf[5] = 1;
9731                         etest->flags |= ETH_TEST_FL_FAILED;
9732                 }
9733
9734 #ifdef BNX2X_EXTRA_DEBUG
9735         bnx2x_panic_dump(bp);
9736 #endif
9737 }
9738
9739 static const struct {
9740         long offset;
9741         int size;
9742         u8 string[ETH_GSTRING_LEN];
9743 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9744 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9745         { Q_STATS_OFFSET32(error_bytes_received_hi),
9746                                                 8, "[%d]: rx_error_bytes" },
9747         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9748                                                 8, "[%d]: rx_ucast_packets" },
9749         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9750                                                 8, "[%d]: rx_mcast_packets" },
9751         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9752                                                 8, "[%d]: rx_bcast_packets" },
9753         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9754         { Q_STATS_OFFSET32(rx_err_discard_pkt),
9755                                          4, "[%d]: rx_phy_ip_err_discards"},
9756         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9757                                          4, "[%d]: rx_skb_alloc_discard" },
9758         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9759
9760 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9761         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9762                                                         8, "[%d]: tx_packets" }
9763 };
9764
9765 static const struct {
9766         long offset;
9767         int size;
9768         u32 flags;
9769 #define STATS_FLAGS_PORT                1
9770 #define STATS_FLAGS_FUNC                2
9771 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9772         u8 string[ETH_GSTRING_LEN];
9773 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9774 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9775                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
9776         { STATS_OFFSET32(error_bytes_received_hi),
9777                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9778         { STATS_OFFSET32(total_unicast_packets_received_hi),
9779                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9780         { STATS_OFFSET32(total_multicast_packets_received_hi),
9781                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9782         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9783                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9784         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9785                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9786         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9787                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9788         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9789                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9790         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9791                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9792 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9793                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9794         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9795                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9796         { STATS_OFFSET32(no_buff_discard_hi),
9797                                 8, STATS_FLAGS_BOTH, "rx_discards" },
9798         { STATS_OFFSET32(mac_filter_discard),
9799                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9800         { STATS_OFFSET32(xxoverflow_discard),
9801                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9802         { STATS_OFFSET32(brb_drop_hi),
9803                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9804         { STATS_OFFSET32(brb_truncate_hi),
9805                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9806         { STATS_OFFSET32(pause_frames_received_hi),
9807                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9808         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9809                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9810         { STATS_OFFSET32(nig_timer_max),
9811                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9812 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9813                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9814         { STATS_OFFSET32(rx_skb_alloc_failed),
9815                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9816         { STATS_OFFSET32(hw_csum_err),
9817                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9818
9819         { STATS_OFFSET32(total_bytes_transmitted_hi),
9820                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
9821         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9822                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9823         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9824                                 8, STATS_FLAGS_BOTH, "tx_packets" },
9825         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9826                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9827         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9828                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9829         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9830                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9831         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9832                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9833 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9834                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9835         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9836                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9837         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9838                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9839         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9840                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9841         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9842                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9843         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9844                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9845         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9846                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9847         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9848                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9849         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9850                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9851         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9852                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9853 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9854                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9855         { STATS_OFFSET32(pause_frames_sent_hi),
9856                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9857 };
9858
9859 #define IS_PORT_STAT(i) \
9860         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9861 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9862 #define IS_E1HMF_MODE_STAT(bp) \
9863                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9864
9865 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9866 {
9867         struct bnx2x *bp = netdev_priv(dev);
9868         int i, j, k;
9869
9870         switch (stringset) {
9871         case ETH_SS_STATS:
9872                 if (is_multi(bp)) {
9873                         k = 0;
9874                         for_each_queue(bp, i) {
9875                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9876                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9877                                                 bnx2x_q_stats_arr[j].string, i);
9878                                 k += BNX2X_NUM_Q_STATS;
9879                         }
9880                         if (IS_E1HMF_MODE_STAT(bp))
9881                                 break;
9882                         for (j = 0; j < BNX2X_NUM_STATS; j++)
9883                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9884                                        bnx2x_stats_arr[j].string);
9885                 } else {
9886                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9887                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9888                                         continue;
9889                                 strcpy(buf + j*ETH_GSTRING_LEN,
9890                                        bnx2x_stats_arr[i].string);
9891                                 j++;
9892                         }
9893                 }
9894                 break;
9895
9896         case ETH_SS_TEST:
9897                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9898                 break;
9899         }
9900 }
9901
9902 static int bnx2x_get_stats_count(struct net_device *dev)
9903 {
9904         struct bnx2x *bp = netdev_priv(dev);
9905         int i, num_stats;
9906
9907         if (is_multi(bp)) {
9908                 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9909                 if (!IS_E1HMF_MODE_STAT(bp))
9910                         num_stats += BNX2X_NUM_STATS;
9911         } else {
9912                 if (IS_E1HMF_MODE_STAT(bp)) {
9913                         num_stats = 0;
9914                         for (i = 0; i < BNX2X_NUM_STATS; i++)
9915                                 if (IS_FUNC_STAT(i))
9916                                         num_stats++;
9917                 } else
9918                         num_stats = BNX2X_NUM_STATS;
9919         }
9920
9921         return num_stats;
9922 }
9923
9924 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9925                                     struct ethtool_stats *stats, u64 *buf)
9926 {
9927         struct bnx2x *bp = netdev_priv(dev);
9928         u32 *hw_stats, *offset;
9929         int i, j, k;
9930
9931         if (is_multi(bp)) {
9932                 k = 0;
9933                 for_each_queue(bp, i) {
9934                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9935                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9936                                 if (bnx2x_q_stats_arr[j].size == 0) {
9937                                         /* skip this counter */
9938                                         buf[k + j] = 0;
9939                                         continue;
9940                                 }
9941                                 offset = (hw_stats +
9942                                           bnx2x_q_stats_arr[j].offset);
9943                                 if (bnx2x_q_stats_arr[j].size == 4) {
9944                                         /* 4-byte counter */
9945                                         buf[k + j] = (u64) *offset;
9946                                         continue;
9947                                 }
9948                                 /* 8-byte counter */
9949                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9950                         }
9951                         k += BNX2X_NUM_Q_STATS;
9952                 }
9953                 if (IS_E1HMF_MODE_STAT(bp))
9954                         return;
9955                 hw_stats = (u32 *)&bp->eth_stats;
9956                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9957                         if (bnx2x_stats_arr[j].size == 0) {
9958                                 /* skip this counter */
9959                                 buf[k + j] = 0;
9960                                 continue;
9961                         }
9962                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
9963                         if (bnx2x_stats_arr[j].size == 4) {
9964                                 /* 4-byte counter */
9965                                 buf[k + j] = (u64) *offset;
9966                                 continue;
9967                         }
9968                         /* 8-byte counter */
9969                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
9970                 }
9971         } else {
9972                 hw_stats = (u32 *)&bp->eth_stats;
9973                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9974                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9975                                 continue;
9976                         if (bnx2x_stats_arr[i].size == 0) {
9977                                 /* skip this counter */
9978                                 buf[j] = 0;
9979                                 j++;
9980                                 continue;
9981                         }
9982                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
9983                         if (bnx2x_stats_arr[i].size == 4) {
9984                                 /* 4-byte counter */
9985                                 buf[j] = (u64) *offset;
9986                                 j++;
9987                                 continue;
9988                         }
9989                         /* 8-byte counter */
9990                         buf[j] = HILO_U64(*offset, *(offset + 1));
9991                         j++;
9992                 }
9993         }
9994 }
9995
9996 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9997 {
9998         struct bnx2x *bp = netdev_priv(dev);
9999         int port = BP_PORT(bp);
10000         int i;
10001
10002         if (!netif_running(dev))
10003                 return 0;
10004
10005         if (!bp->port.pmf)
10006                 return 0;
10007
10008         if (data == 0)
10009                 data = 2;
10010
10011         for (i = 0; i < (data * 2); i++) {
10012                 if ((i % 2) == 0)
10013                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10014                                       bp->link_params.hw_led_mode,
10015                                       bp->link_params.chip_id);
10016                 else
10017                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10018                                       bp->link_params.hw_led_mode,
10019                                       bp->link_params.chip_id);
10020
10021                 msleep_interruptible(500);
10022                 if (signal_pending(current))
10023                         break;
10024         }
10025
10026         if (bp->link_vars.link_up)
10027                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10028                               bp->link_vars.line_speed,
10029                               bp->link_params.hw_led_mode,
10030                               bp->link_params.chip_id);
10031
10032         return 0;
10033 }
10034
10035 static struct ethtool_ops bnx2x_ethtool_ops = {
10036         .get_settings           = bnx2x_get_settings,
10037         .set_settings           = bnx2x_set_settings,
10038         .get_drvinfo            = bnx2x_get_drvinfo,
10039         .get_regs_len           = bnx2x_get_regs_len,
10040         .get_regs               = bnx2x_get_regs,
10041         .get_wol                = bnx2x_get_wol,
10042         .set_wol                = bnx2x_set_wol,
10043         .get_msglevel           = bnx2x_get_msglevel,
10044         .set_msglevel           = bnx2x_set_msglevel,
10045         .nway_reset             = bnx2x_nway_reset,
10046         .get_link               = bnx2x_get_link,
10047         .get_eeprom_len         = bnx2x_get_eeprom_len,
10048         .get_eeprom             = bnx2x_get_eeprom,
10049         .set_eeprom             = bnx2x_set_eeprom,
10050         .get_coalesce           = bnx2x_get_coalesce,
10051         .set_coalesce           = bnx2x_set_coalesce,
10052         .get_ringparam          = bnx2x_get_ringparam,
10053         .set_ringparam          = bnx2x_set_ringparam,
10054         .get_pauseparam         = bnx2x_get_pauseparam,
10055         .set_pauseparam         = bnx2x_set_pauseparam,
10056         .get_rx_csum            = bnx2x_get_rx_csum,
10057         .set_rx_csum            = bnx2x_set_rx_csum,
10058         .get_tx_csum            = ethtool_op_get_tx_csum,
10059         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10060         .set_flags              = bnx2x_set_flags,
10061         .get_flags              = ethtool_op_get_flags,
10062         .get_sg                 = ethtool_op_get_sg,
10063         .set_sg                 = ethtool_op_set_sg,
10064         .get_tso                = ethtool_op_get_tso,
10065         .set_tso                = bnx2x_set_tso,
10066         .self_test_count        = bnx2x_self_test_count,
10067         .self_test              = bnx2x_self_test,
10068         .get_strings            = bnx2x_get_strings,
10069         .phys_id                = bnx2x_phys_id,
10070         .get_stats_count        = bnx2x_get_stats_count,
10071         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10072 };
10073
10074 /* end of ethtool_ops */
10075
10076 /****************************************************************************
10077 * General service functions
10078 ****************************************************************************/
10079
10080 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10081 {
10082         u16 pmcsr;
10083
10084         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10085
10086         switch (state) {
10087         case PCI_D0:
10088                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10089                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10090                                        PCI_PM_CTRL_PME_STATUS));
10091
10092                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10093                         /* delay required during transition out of D3hot */
10094                         msleep(20);
10095                 break;
10096
10097         case PCI_D3hot:
10098                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10099                 pmcsr |= 3;
10100
10101                 if (bp->wol)
10102                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10103
10104                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10105                                       pmcsr);
10106
10107                 /* No more memory access after this point until
10108                 * device is brought back to D0.
10109                 */
10110                 break;
10111
10112         default:
10113                 return -EINVAL;
10114         }
10115         return 0;
10116 }
10117
10118 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10119 {
10120         u16 rx_cons_sb;
10121
10122         /* Tell compiler that status block fields can change */
10123         barrier();
10124         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10125         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10126                 rx_cons_sb++;
10127         return (fp->rx_comp_cons != rx_cons_sb);
10128 }
10129
10130 /*
10131  * net_device service functions
10132  */
10133
10134 static int bnx2x_poll(struct napi_struct *napi, int budget)
10135 {
10136         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10137                                                  napi);
10138         struct bnx2x *bp = fp->bp;
10139         int work_done = 0;
10140
10141 #ifdef BNX2X_STOP_ON_ERROR
10142         if (unlikely(bp->panic))
10143                 goto poll_panic;
10144 #endif
10145
10146         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10147         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10148         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10149
10150         bnx2x_update_fpsb_idx(fp);
10151
10152         if (bnx2x_has_tx_work(fp))
10153                 bnx2x_tx_int(fp);
10154
10155         if (bnx2x_has_rx_work(fp)) {
10156                 work_done = bnx2x_rx_int(fp, budget);
10157
10158                 /* must not complete if we consumed full budget */
10159                 if (work_done >= budget)
10160                         goto poll_again;
10161         }
10162
10163         /* BNX2X_HAS_WORK() reads the status block, thus we need to
10164          * ensure that status block indices have been actually read
10165          * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10166          * so that we won't write the "newer" value of the status block to IGU
10167          * (if there was a DMA right after BNX2X_HAS_WORK and
10168          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10169          * may be postponed to right before bnx2x_ack_sb). In this case
10170          * there will never be another interrupt until there is another update
10171          * of the status block, while there is still unhandled work.
10172          */
10173         rmb();
10174
10175         if (!BNX2X_HAS_WORK(fp)) {
10176 #ifdef BNX2X_STOP_ON_ERROR
10177 poll_panic:
10178 #endif
10179                 napi_complete(napi);
10180
10181                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10182                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10183                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10184                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10185         }
10186
10187 poll_again:
10188         return work_done;
10189 }
10190
10191
10192 /* we split the first BD into headers and data BDs
10193  * to ease the pain of our fellow microcode engineers
10194  * we use one mapping for both BDs
10195  * So far this has only been observed to happen
10196  * in Other Operating Systems(TM)
10197  */
10198 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10199                                    struct bnx2x_fastpath *fp,
10200                                    struct eth_tx_bd **tx_bd, u16 hlen,
10201                                    u16 bd_prod, int nbd)
10202 {
10203         struct eth_tx_bd *h_tx_bd = *tx_bd;
10204         struct eth_tx_bd *d_tx_bd;
10205         dma_addr_t mapping;
10206         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10207
10208         /* first fix first BD */
10209         h_tx_bd->nbd = cpu_to_le16(nbd);
10210         h_tx_bd->nbytes = cpu_to_le16(hlen);
10211
10212         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10213            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10214            h_tx_bd->addr_lo, h_tx_bd->nbd);
10215
10216         /* now get a new data BD
10217          * (after the pbd) and fill it */
10218         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10219         d_tx_bd = &fp->tx_desc_ring[bd_prod];
10220
10221         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10222                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10223
10224         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10225         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10226         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10227         d_tx_bd->vlan = 0;
10228         /* this marks the BD as one that has no individual mapping
10229          * the FW ignores this flag in a BD not marked start
10230          */
10231         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10232         DP(NETIF_MSG_TX_QUEUED,
10233            "TSO split data size is %d (%x:%x)\n",
10234            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10235
10236         /* update tx_bd for marking the last BD flag */
10237         *tx_bd = d_tx_bd;
10238
10239         return bd_prod;
10240 }
10241
10242 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10243 {
10244         if (fix > 0)
10245                 csum = (u16) ~csum_fold(csum_sub(csum,
10246                                 csum_partial(t_header - fix, fix, 0)));
10247
10248         else if (fix < 0)
10249                 csum = (u16) ~csum_fold(csum_add(csum,
10250                                 csum_partial(t_header, -fix, 0)));
10251
10252         return swab16(csum);
10253 }
10254
10255 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10256 {
10257         u32 rc;
10258
10259         if (skb->ip_summed != CHECKSUM_PARTIAL)
10260                 rc = XMIT_PLAIN;
10261
10262         else {
10263                 if (skb->protocol == htons(ETH_P_IPV6)) {
10264                         rc = XMIT_CSUM_V6;
10265                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10266                                 rc |= XMIT_CSUM_TCP;
10267
10268                 } else {
10269                         rc = XMIT_CSUM_V4;
10270                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10271                                 rc |= XMIT_CSUM_TCP;
10272                 }
10273         }
10274
10275         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10276                 rc |= XMIT_GSO_V4;
10277
10278         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10279                 rc |= XMIT_GSO_V6;
10280
10281         return rc;
10282 }
10283
10284 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10285 /* check if packet requires linearization (packet is too fragmented)
10286    no need to check fragmentation if page size > 8K (there will be no
10287    violation to FW restrictions) */
10288 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10289                              u32 xmit_type)
10290 {
10291         int to_copy = 0;
10292         int hlen = 0;
10293         int first_bd_sz = 0;
10294
10295         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10296         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10297
10298                 if (xmit_type & XMIT_GSO) {
10299                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10300                         /* Check if LSO packet needs to be copied:
10301                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10302                         int wnd_size = MAX_FETCH_BD - 3;
10303                         /* Number of windows to check */
10304                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10305                         int wnd_idx = 0;
10306                         int frag_idx = 0;
10307                         u32 wnd_sum = 0;
10308
10309                         /* Headers length */
10310                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10311                                 tcp_hdrlen(skb);
10312
10313                         /* Amount of data (w/o headers) on linear part of SKB*/
10314                         first_bd_sz = skb_headlen(skb) - hlen;
10315
10316                         wnd_sum  = first_bd_sz;
10317
10318                         /* Calculate the first sum - it's special */
10319                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10320                                 wnd_sum +=
10321                                         skb_shinfo(skb)->frags[frag_idx].size;
10322
10323                         /* If there was data on linear skb data - check it */
10324                         if (first_bd_sz > 0) {
10325                                 if (unlikely(wnd_sum < lso_mss)) {
10326                                         to_copy = 1;
10327                                         goto exit_lbl;
10328                                 }
10329
10330                                 wnd_sum -= first_bd_sz;
10331                         }
10332
10333                         /* Others are easier: run through the frag list and
10334                            check all windows */
10335                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10336                                 wnd_sum +=
10337                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10338
10339                                 if (unlikely(wnd_sum < lso_mss)) {
10340                                         to_copy = 1;
10341                                         break;
10342                                 }
10343                                 wnd_sum -=
10344                                         skb_shinfo(skb)->frags[wnd_idx].size;
10345                         }
10346                 } else {
10347                         /* in non-LSO too fragmented packet should always
10348                            be linearized */
10349                         to_copy = 1;
10350                 }
10351         }
10352
10353 exit_lbl:
10354         if (unlikely(to_copy))
10355                 DP(NETIF_MSG_TX_QUEUED,
10356                    "Linearization IS REQUIRED for %s packet. "
10357                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10358                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10359                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10360
10361         return to_copy;
10362 }
10363 #endif
10364
10365 /* called with netif_tx_lock
10366  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10367  * netif_wake_queue()
10368  */
10369 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10370 {
10371         struct bnx2x *bp = netdev_priv(dev);
10372         struct bnx2x_fastpath *fp;
10373         struct netdev_queue *txq;
10374         struct sw_tx_bd *tx_buf;
10375         struct eth_tx_bd *tx_bd;
10376         struct eth_tx_parse_bd *pbd = NULL;
10377         u16 pkt_prod, bd_prod;
10378         int nbd, fp_index;
10379         dma_addr_t mapping;
10380         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10381         int vlan_off = (bp->e1hov ? 4 : 0);
10382         int i;
10383         u8 hlen = 0;
10384
10385 #ifdef BNX2X_STOP_ON_ERROR
10386         if (unlikely(bp->panic))
10387                 return NETDEV_TX_BUSY;
10388 #endif
10389
10390         fp_index = skb_get_queue_mapping(skb);
10391         txq = netdev_get_tx_queue(dev, fp_index);
10392
10393         fp = &bp->fp[fp_index];
10394
10395         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10396                 fp->eth_q_stats.driver_xoff++,
10397                 netif_tx_stop_queue(txq);
10398                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10399                 return NETDEV_TX_BUSY;
10400         }
10401
10402         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10403            "  gso type %x  xmit_type %x\n",
10404            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10405            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10406
10407 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10408         /* First, check if we need to linearize the skb (due to FW
10409            restrictions). No need to check fragmentation if page size > 8K
10410            (there will be no violation to FW restrictions) */
10411         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10412                 /* Statistics of linearization */
10413                 bp->lin_cnt++;
10414                 if (skb_linearize(skb) != 0) {
10415                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10416                            "silently dropping this SKB\n");
10417                         dev_kfree_skb_any(skb);
10418                         return NETDEV_TX_OK;
10419                 }
10420         }
10421 #endif
10422
10423         /*
10424         Please read carefully. First we use one BD which we mark as start,
10425         then for TSO or xsum we have a parsing info BD,
10426         and only then we have the rest of the TSO BDs.
10427         (don't forget to mark the last one as last,
10428         and to unmap only AFTER you write to the BD ...)
10429         And above all, all pdb sizes are in words - NOT DWORDS!
10430         */
10431
10432         pkt_prod = fp->tx_pkt_prod++;
10433         bd_prod = TX_BD(fp->tx_bd_prod);
10434
10435         /* get a tx_buf and first BD */
10436         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10437         tx_bd = &fp->tx_desc_ring[bd_prod];
10438
10439         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10440         tx_bd->general_data = (UNICAST_ADDRESS <<
10441                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10442         /* header nbd */
10443         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10444
10445         /* remember the first BD of the packet */
10446         tx_buf->first_bd = fp->tx_bd_prod;
10447         tx_buf->skb = skb;
10448
10449         DP(NETIF_MSG_TX_QUEUED,
10450            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10451            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10452
10453 #ifdef BCM_VLAN
10454         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10455             (bp->flags & HW_VLAN_TX_FLAG)) {
10456                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10457                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10458                 vlan_off += 4;
10459         } else
10460 #endif
10461                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10462
10463         if (xmit_type) {
10464                 /* turn on parsing and get a BD */
10465                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10466                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10467
10468                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10469         }
10470
10471         if (xmit_type & XMIT_CSUM) {
10472                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10473
10474                 /* for now NS flag is not used in Linux */
10475                 pbd->global_data =
10476                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10477                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10478
10479                 pbd->ip_hlen = (skb_transport_header(skb) -
10480                                 skb_network_header(skb)) / 2;
10481
10482                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10483
10484                 pbd->total_hlen = cpu_to_le16(hlen);
10485                 hlen = hlen*2 - vlan_off;
10486
10487                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10488
10489                 if (xmit_type & XMIT_CSUM_V4)
10490                         tx_bd->bd_flags.as_bitfield |=
10491                                                 ETH_TX_BD_FLAGS_IP_CSUM;
10492                 else
10493                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10494
10495                 if (xmit_type & XMIT_CSUM_TCP) {
10496                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10497
10498                 } else {
10499                         s8 fix = SKB_CS_OFF(skb); /* signed! */
10500
10501                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10502                         pbd->cs_offset = fix / 2;
10503
10504                         DP(NETIF_MSG_TX_QUEUED,
10505                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
10506                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10507                            SKB_CS(skb));
10508
10509                         /* HW bug: fixup the CSUM */
10510                         pbd->tcp_pseudo_csum =
10511                                 bnx2x_csum_fix(skb_transport_header(skb),
10512                                                SKB_CS(skb), fix);
10513
10514                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10515                            pbd->tcp_pseudo_csum);
10516                 }
10517         }
10518
10519         mapping = pci_map_single(bp->pdev, skb->data,
10520                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10521
10522         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10523         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10524         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10525         tx_bd->nbd = cpu_to_le16(nbd);
10526         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10527
10528         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
10529            "  nbytes %d  flags %x  vlan %x\n",
10530            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10531            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10532            le16_to_cpu(tx_bd->vlan));
10533
10534         if (xmit_type & XMIT_GSO) {
10535
10536                 DP(NETIF_MSG_TX_QUEUED,
10537                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
10538                    skb->len, hlen, skb_headlen(skb),
10539                    skb_shinfo(skb)->gso_size);
10540
10541                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10542
10543                 if (unlikely(skb_headlen(skb) > hlen))
10544                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10545                                                  bd_prod, ++nbd);
10546
10547                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10548                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10549                 pbd->tcp_flags = pbd_tcp_flags(skb);
10550
10551                 if (xmit_type & XMIT_GSO_V4) {
10552                         pbd->ip_id = swab16(ip_hdr(skb)->id);
10553                         pbd->tcp_pseudo_csum =
10554                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10555                                                           ip_hdr(skb)->daddr,
10556                                                           0, IPPROTO_TCP, 0));
10557
10558                 } else
10559                         pbd->tcp_pseudo_csum =
10560                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10561                                                         &ipv6_hdr(skb)->daddr,
10562                                                         0, IPPROTO_TCP, 0));
10563
10564                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10565         }
10566
10567         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10568                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10569
10570                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10571                 tx_bd = &fp->tx_desc_ring[bd_prod];
10572
10573                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10574                                        frag->size, PCI_DMA_TODEVICE);
10575
10576                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10577                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10578                 tx_bd->nbytes = cpu_to_le16(frag->size);
10579                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10580                 tx_bd->bd_flags.as_bitfield = 0;
10581
10582                 DP(NETIF_MSG_TX_QUEUED,
10583                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
10584                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10585                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10586         }
10587
10588         /* now at last mark the BD as the last BD */
10589         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10590
10591         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
10592            tx_bd, tx_bd->bd_flags.as_bitfield);
10593
10594         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10595
10596         /* now send a tx doorbell, counting the next BD
10597          * if the packet contains or ends with it
10598          */
10599         if (TX_BD_POFF(bd_prod) < nbd)
10600                 nbd++;
10601
10602         if (pbd)
10603                 DP(NETIF_MSG_TX_QUEUED,
10604                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
10605                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
10606                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10607                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10608                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10609
10610         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
10611
10612         /*
10613          * Make sure that the BD data is updated before updating the producer
10614          * since FW might read the BD right after the producer is updated.
10615          * This is only applicable for weak-ordered memory model archs such
10616          * as IA-64. The following barrier is also mandatory since FW will
10617          * assumes packets must have BDs.
10618          */
10619         wmb();
10620
10621         le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10622         mb(); /* FW restriction: must not reorder writing nbd and packets */
10623         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10624         DOORBELL(bp, fp->index, 0);
10625
10626         mmiowb();
10627
10628         fp->tx_bd_prod += nbd;
10629
10630         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10631                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10632                    if we put Tx into XOFF state. */
10633                 smp_mb();
10634                 netif_tx_stop_queue(txq);
10635                 fp->eth_q_stats.driver_xoff++;
10636                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10637                         netif_tx_wake_queue(txq);
10638         }
10639         fp->tx_pkt++;
10640
10641         return NETDEV_TX_OK;
10642 }
10643
10644 /* called with rtnl_lock */
10645 static int bnx2x_open(struct net_device *dev)
10646 {
10647         struct bnx2x *bp = netdev_priv(dev);
10648
10649         netif_carrier_off(dev);
10650
10651         bnx2x_set_power_state(bp, PCI_D0);
10652
10653         return bnx2x_nic_load(bp, LOAD_OPEN);
10654 }
10655
10656 /* called with rtnl_lock */
10657 static int bnx2x_close(struct net_device *dev)
10658 {
10659         struct bnx2x *bp = netdev_priv(dev);
10660
10661         /* Unload the driver, release IRQs */
10662         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10663         if (atomic_read(&bp->pdev->enable_cnt) == 1)
10664                 if (!CHIP_REV_IS_SLOW(bp))
10665                         bnx2x_set_power_state(bp, PCI_D3hot);
10666
10667         return 0;
10668 }
10669
10670 /* called with netif_tx_lock from dev_mcast.c */
10671 static void bnx2x_set_rx_mode(struct net_device *dev)
10672 {
10673         struct bnx2x *bp = netdev_priv(dev);
10674         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10675         int port = BP_PORT(bp);
10676
10677         if (bp->state != BNX2X_STATE_OPEN) {
10678                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10679                 return;
10680         }
10681
10682         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10683
10684         if (dev->flags & IFF_PROMISC)
10685                 rx_mode = BNX2X_RX_MODE_PROMISC;
10686
10687         else if ((dev->flags & IFF_ALLMULTI) ||
10688                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10689                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10690
10691         else { /* some multicasts */
10692                 if (CHIP_IS_E1(bp)) {
10693                         int i, old, offset;
10694                         struct dev_mc_list *mclist;
10695                         struct mac_configuration_cmd *config =
10696                                                 bnx2x_sp(bp, mcast_config);
10697
10698                         for (i = 0, mclist = dev->mc_list;
10699                              mclist && (i < dev->mc_count);
10700                              i++, mclist = mclist->next) {
10701
10702                                 config->config_table[i].
10703                                         cam_entry.msb_mac_addr =
10704                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
10705                                 config->config_table[i].
10706                                         cam_entry.middle_mac_addr =
10707                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
10708                                 config->config_table[i].
10709                                         cam_entry.lsb_mac_addr =
10710                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
10711                                 config->config_table[i].cam_entry.flags =
10712                                                         cpu_to_le16(port);
10713                                 config->config_table[i].
10714                                         target_table_entry.flags = 0;
10715                                 config->config_table[i].
10716                                         target_table_entry.client_id = 0;
10717                                 config->config_table[i].
10718                                         target_table_entry.vlan_id = 0;
10719
10720                                 DP(NETIF_MSG_IFUP,
10721                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10722                                    config->config_table[i].
10723                                                 cam_entry.msb_mac_addr,
10724                                    config->config_table[i].
10725                                                 cam_entry.middle_mac_addr,
10726                                    config->config_table[i].
10727                                                 cam_entry.lsb_mac_addr);
10728                         }
10729                         old = config->hdr.length;
10730                         if (old > i) {
10731                                 for (; i < old; i++) {
10732                                         if (CAM_IS_INVALID(config->
10733                                                            config_table[i])) {
10734                                                 /* already invalidated */
10735                                                 break;
10736                                         }
10737                                         /* invalidate */
10738                                         CAM_INVALIDATE(config->
10739                                                        config_table[i]);
10740                                 }
10741                         }
10742
10743                         if (CHIP_REV_IS_SLOW(bp))
10744                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10745                         else
10746                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
10747
10748                         config->hdr.length = i;
10749                         config->hdr.offset = offset;
10750                         config->hdr.client_id = bp->fp->cl_id;
10751                         config->hdr.reserved1 = 0;
10752
10753                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10754                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10755                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10756                                       0);
10757                 } else { /* E1H */
10758                         /* Accept one or more multicasts */
10759                         struct dev_mc_list *mclist;
10760                         u32 mc_filter[MC_HASH_SIZE];
10761                         u32 crc, bit, regidx;
10762                         int i;
10763
10764                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10765
10766                         for (i = 0, mclist = dev->mc_list;
10767                              mclist && (i < dev->mc_count);
10768                              i++, mclist = mclist->next) {
10769
10770                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10771                                    mclist->dmi_addr);
10772
10773                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10774                                 bit = (crc >> 24) & 0xff;
10775                                 regidx = bit >> 5;
10776                                 bit &= 0x1f;
10777                                 mc_filter[regidx] |= (1 << bit);
10778                         }
10779
10780                         for (i = 0; i < MC_HASH_SIZE; i++)
10781                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10782                                        mc_filter[i]);
10783                 }
10784         }
10785
10786         bp->rx_mode = rx_mode;
10787         bnx2x_set_storm_rx_mode(bp);
10788 }
10789
10790 /* called with rtnl_lock */
10791 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10792 {
10793         struct sockaddr *addr = p;
10794         struct bnx2x *bp = netdev_priv(dev);
10795
10796         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10797                 return -EINVAL;
10798
10799         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10800         if (netif_running(dev)) {
10801                 if (CHIP_IS_E1(bp))
10802                         bnx2x_set_mac_addr_e1(bp, 1);
10803                 else
10804                         bnx2x_set_mac_addr_e1h(bp, 1);
10805         }
10806
10807         return 0;
10808 }
10809
10810 /* called with rtnl_lock */
10811 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10812 {
10813         struct mii_ioctl_data *data = if_mii(ifr);
10814         struct bnx2x *bp = netdev_priv(dev);
10815         int port = BP_PORT(bp);
10816         int err;
10817
10818         switch (cmd) {
10819         case SIOCGMIIPHY:
10820                 data->phy_id = bp->port.phy_addr;
10821
10822                 /* fallthrough */
10823
10824         case SIOCGMIIREG: {
10825                 u16 mii_regval;
10826
10827                 if (!netif_running(dev))
10828                         return -EAGAIN;
10829
10830                 mutex_lock(&bp->port.phy_mutex);
10831                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10832                                       DEFAULT_PHY_DEV_ADDR,
10833                                       (data->reg_num & 0x1f), &mii_regval);
10834                 data->val_out = mii_regval;
10835                 mutex_unlock(&bp->port.phy_mutex);
10836                 return err;
10837         }
10838
10839         case SIOCSMIIREG:
10840                 if (!capable(CAP_NET_ADMIN))
10841                         return -EPERM;
10842
10843                 if (!netif_running(dev))
10844                         return -EAGAIN;
10845
10846                 mutex_lock(&bp->port.phy_mutex);
10847                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10848                                        DEFAULT_PHY_DEV_ADDR,
10849                                        (data->reg_num & 0x1f), data->val_in);
10850                 mutex_unlock(&bp->port.phy_mutex);
10851                 return err;
10852
10853         default:
10854                 /* do nothing */
10855                 break;
10856         }
10857
10858         return -EOPNOTSUPP;
10859 }
10860
10861 /* called with rtnl_lock */
10862 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10863 {
10864         struct bnx2x *bp = netdev_priv(dev);
10865         int rc = 0;
10866
10867         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10868             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10869                 return -EINVAL;
10870
10871         /* This does not race with packet allocation
10872          * because the actual alloc size is
10873          * only updated as part of load
10874          */
10875         dev->mtu = new_mtu;
10876
10877         if (netif_running(dev)) {
10878                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10879                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10880         }
10881
10882         return rc;
10883 }
10884
10885 static void bnx2x_tx_timeout(struct net_device *dev)
10886 {
10887         struct bnx2x *bp = netdev_priv(dev);
10888
10889 #ifdef BNX2X_STOP_ON_ERROR
10890         if (!bp->panic)
10891                 bnx2x_panic();
10892 #endif
10893         /* This allows the netif to be shutdown gracefully before resetting */
10894         schedule_work(&bp->reset_task);
10895 }
10896
10897 #ifdef BCM_VLAN
10898 /* called with rtnl_lock */
10899 static void bnx2x_vlan_rx_register(struct net_device *dev,
10900                                    struct vlan_group *vlgrp)
10901 {
10902         struct bnx2x *bp = netdev_priv(dev);
10903
10904         bp->vlgrp = vlgrp;
10905
10906         /* Set flags according to the required capabilities */
10907         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10908
10909         if (dev->features & NETIF_F_HW_VLAN_TX)
10910                 bp->flags |= HW_VLAN_TX_FLAG;
10911
10912         if (dev->features & NETIF_F_HW_VLAN_RX)
10913                 bp->flags |= HW_VLAN_RX_FLAG;
10914
10915         if (netif_running(dev))
10916                 bnx2x_set_client_config(bp);
10917 }
10918
10919 #endif
10920
10921 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10922 static void poll_bnx2x(struct net_device *dev)
10923 {
10924         struct bnx2x *bp = netdev_priv(dev);
10925
10926         disable_irq(bp->pdev->irq);
10927         bnx2x_interrupt(bp->pdev->irq, dev);
10928         enable_irq(bp->pdev->irq);
10929 }
10930 #endif
10931
10932 static const struct net_device_ops bnx2x_netdev_ops = {
10933         .ndo_open               = bnx2x_open,
10934         .ndo_stop               = bnx2x_close,
10935         .ndo_start_xmit         = bnx2x_start_xmit,
10936         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10937         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10938         .ndo_validate_addr      = eth_validate_addr,
10939         .ndo_do_ioctl           = bnx2x_ioctl,
10940         .ndo_change_mtu         = bnx2x_change_mtu,
10941         .ndo_tx_timeout         = bnx2x_tx_timeout,
10942 #ifdef BCM_VLAN
10943         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10944 #endif
10945 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10946         .ndo_poll_controller    = poll_bnx2x,
10947 #endif
10948 };
10949
10950 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10951                                     struct net_device *dev)
10952 {
10953         struct bnx2x *bp;
10954         int rc;
10955
10956         SET_NETDEV_DEV(dev, &pdev->dev);
10957         bp = netdev_priv(dev);
10958
10959         bp->dev = dev;
10960         bp->pdev = pdev;
10961         bp->flags = 0;
10962         bp->func = PCI_FUNC(pdev->devfn);
10963
10964         rc = pci_enable_device(pdev);
10965         if (rc) {
10966                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10967                 goto err_out;
10968         }
10969
10970         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10971                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10972                        " aborting\n");
10973                 rc = -ENODEV;
10974                 goto err_out_disable;
10975         }
10976
10977         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10978                 printk(KERN_ERR PFX "Cannot find second PCI device"
10979                        " base address, aborting\n");
10980                 rc = -ENODEV;
10981                 goto err_out_disable;
10982         }
10983
10984         if (atomic_read(&pdev->enable_cnt) == 1) {
10985                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10986                 if (rc) {
10987                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10988                                " aborting\n");
10989                         goto err_out_disable;
10990                 }
10991
10992                 pci_set_master(pdev);
10993                 pci_save_state(pdev);
10994         }
10995
10996         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10997         if (bp->pm_cap == 0) {
10998                 printk(KERN_ERR PFX "Cannot find power management"
10999                        " capability, aborting\n");
11000                 rc = -EIO;
11001                 goto err_out_release;
11002         }
11003
11004         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11005         if (bp->pcie_cap == 0) {
11006                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11007                        " aborting\n");
11008                 rc = -EIO;
11009                 goto err_out_release;
11010         }
11011
11012         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11013                 bp->flags |= USING_DAC_FLAG;
11014                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11015                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11016                                " failed, aborting\n");
11017                         rc = -EIO;
11018                         goto err_out_release;
11019                 }
11020
11021         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11022                 printk(KERN_ERR PFX "System does not support DMA,"
11023                        " aborting\n");
11024                 rc = -EIO;
11025                 goto err_out_release;
11026         }
11027
11028         dev->mem_start = pci_resource_start(pdev, 0);
11029         dev->base_addr = dev->mem_start;
11030         dev->mem_end = pci_resource_end(pdev, 0);
11031
11032         dev->irq = pdev->irq;
11033
11034         bp->regview = pci_ioremap_bar(pdev, 0);
11035         if (!bp->regview) {
11036                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11037                 rc = -ENOMEM;
11038                 goto err_out_release;
11039         }
11040
11041         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11042                                         min_t(u64, BNX2X_DB_SIZE,
11043                                               pci_resource_len(pdev, 2)));
11044         if (!bp->doorbells) {
11045                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11046                 rc = -ENOMEM;
11047                 goto err_out_unmap;
11048         }
11049
11050         bnx2x_set_power_state(bp, PCI_D0);
11051
11052         /* clean indirect addresses */
11053         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11054                                PCICFG_VENDOR_ID_OFFSET);
11055         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11056         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11057         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11058         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11059
11060         dev->watchdog_timeo = TX_TIMEOUT;
11061
11062         dev->netdev_ops = &bnx2x_netdev_ops;
11063         dev->ethtool_ops = &bnx2x_ethtool_ops;
11064         dev->features |= NETIF_F_SG;
11065         dev->features |= NETIF_F_HW_CSUM;
11066         if (bp->flags & USING_DAC_FLAG)
11067                 dev->features |= NETIF_F_HIGHDMA;
11068 #ifdef BCM_VLAN
11069         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11070         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11071 #endif
11072         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11073         dev->features |= NETIF_F_TSO6;
11074
11075         return 0;
11076
11077 err_out_unmap:
11078         if (bp->regview) {
11079                 iounmap(bp->regview);
11080                 bp->regview = NULL;
11081         }
11082         if (bp->doorbells) {
11083                 iounmap(bp->doorbells);
11084                 bp->doorbells = NULL;
11085         }
11086
11087 err_out_release:
11088         if (atomic_read(&pdev->enable_cnt) == 1)
11089                 pci_release_regions(pdev);
11090
11091 err_out_disable:
11092         pci_disable_device(pdev);
11093         pci_set_drvdata(pdev, NULL);
11094
11095 err_out:
11096         return rc;
11097 }
11098
11099 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11100 {
11101         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11102
11103         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11104         return val;
11105 }
11106
11107 /* return value of 1=2.5GHz 2=5GHz */
11108 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11109 {
11110         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11111
11112         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11113         return val;
11114 }
11115 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11116 {
11117         struct bnx2x_fw_file_hdr *fw_hdr;
11118         struct bnx2x_fw_file_section *sections;
11119         u16 *ops_offsets;
11120         u32 offset, len, num_ops;
11121         int i;
11122         const struct firmware *firmware = bp->firmware;
11123         const u8 * fw_ver;
11124
11125         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11126                 return -EINVAL;
11127
11128         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11129         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11130
11131         /* Make sure none of the offsets and sizes make us read beyond
11132          * the end of the firmware data */
11133         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11134                 offset = be32_to_cpu(sections[i].offset);
11135                 len = be32_to_cpu(sections[i].len);
11136                 if (offset + len > firmware->size) {
11137                         printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11138                         return -EINVAL;
11139                 }
11140         }
11141
11142         /* Likewise for the init_ops offsets */
11143         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11144         ops_offsets = (u16 *)(firmware->data + offset);
11145         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11146
11147         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11148                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11149                         printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11150                         return -EINVAL;
11151                 }
11152         }
11153
11154         /* Check FW version */
11155         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11156         fw_ver = firmware->data + offset;
11157         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11158             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11159             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11160             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11161                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11162                                     " Should be %d.%d.%d.%d\n",
11163                        fw_ver[0], fw_ver[1], fw_ver[2],
11164                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11165                        BCM_5710_FW_MINOR_VERSION,
11166                        BCM_5710_FW_REVISION_VERSION,
11167                        BCM_5710_FW_ENGINEERING_VERSION);
11168                 return -EINVAL;
11169         }
11170
11171         return 0;
11172 }
11173
11174 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11175 {
11176         u32 i;
11177         const __be32 *source = (const __be32*)_source;
11178         u32 *target = (u32*)_target;
11179
11180         for (i = 0; i < n/4; i++)
11181                 target[i] = be32_to_cpu(source[i]);
11182 }
11183
11184 /*
11185    Ops array is stored in the following format:
11186    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11187  */
11188 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11189 {
11190         u32 i, j, tmp;
11191         const __be32 *source = (const __be32*)_source;
11192         struct raw_op *target = (struct raw_op*)_target;
11193
11194         for (i = 0, j = 0; i < n/8; i++, j+=2) {
11195                 tmp = be32_to_cpu(source[j]);
11196                 target[i].op = (tmp >> 24) & 0xff;
11197                 target[i].offset =  tmp & 0xffffff;
11198                 target[i].raw_data = be32_to_cpu(source[j+1]);
11199         }
11200 }
11201 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11202 {
11203         u32 i;
11204         u16 *target = (u16*)_target;
11205         const __be16 *source = (const __be16*)_source;
11206
11207         for (i = 0; i < n/2; i++)
11208                 target[i] = be16_to_cpu(source[i]);
11209 }
11210
11211 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11212         do {   \
11213                 u32 len = be32_to_cpu(fw_hdr->arr.len);   \
11214                 bp->arr = kmalloc(len, GFP_KERNEL);  \
11215                 if (!bp->arr) { \
11216                         printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11217                         goto lbl; \
11218                 } \
11219                 func(bp->firmware->data + \
11220                         be32_to_cpu(fw_hdr->arr.offset), \
11221                         (u8*)bp->arr, len); \
11222         } while (0)
11223
11224
11225 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11226 {
11227         char fw_file_name[40] = {0};
11228         int rc, offset;
11229         struct bnx2x_fw_file_hdr *fw_hdr;
11230
11231         /* Create a FW file name */
11232         if (CHIP_IS_E1(bp))
11233                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11234         else
11235                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11236
11237         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11238                 BCM_5710_FW_MAJOR_VERSION,
11239                 BCM_5710_FW_MINOR_VERSION,
11240                 BCM_5710_FW_REVISION_VERSION,
11241                 BCM_5710_FW_ENGINEERING_VERSION);
11242
11243         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11244
11245         rc = request_firmware(&bp->firmware, fw_file_name, dev);
11246         if (rc) {
11247                 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11248                 goto request_firmware_exit;
11249         }
11250
11251         rc = bnx2x_check_firmware(bp);
11252         if (rc) {
11253                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11254                 goto request_firmware_exit;
11255         }
11256
11257         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11258
11259         /* Initialize the pointers to the init arrays */
11260         /* Blob */
11261         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11262
11263         /* Opcodes */
11264         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11265
11266         /* Offsets */
11267         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11268
11269         /* STORMs firmware */
11270         bp->tsem_int_table_data = bp->firmware->data +
11271                 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11272         bp->tsem_pram_data      = bp->firmware->data +
11273                 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11274         bp->usem_int_table_data = bp->firmware->data +
11275                 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11276         bp->usem_pram_data      = bp->firmware->data +
11277                 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11278         bp->xsem_int_table_data = bp->firmware->data +
11279                 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11280         bp->xsem_pram_data      = bp->firmware->data +
11281                 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11282         bp->csem_int_table_data = bp->firmware->data +
11283                 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11284         bp->csem_pram_data      = bp->firmware->data +
11285                 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11286
11287         return 0;
11288 init_offsets_alloc_err:
11289         kfree(bp->init_ops);
11290 init_ops_alloc_err:
11291         kfree(bp->init_data);
11292 request_firmware_exit:
11293         release_firmware(bp->firmware);
11294
11295         return rc;
11296 }
11297
11298
11299
11300 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11301                                     const struct pci_device_id *ent)
11302 {
11303         static int version_printed;
11304         struct net_device *dev = NULL;
11305         struct bnx2x *bp;
11306         int rc;
11307
11308         if (version_printed++ == 0)
11309                 printk(KERN_INFO "%s", version);
11310
11311         /* dev zeroed in init_etherdev */
11312         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11313         if (!dev) {
11314                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11315                 return -ENOMEM;
11316         }
11317
11318         bp = netdev_priv(dev);
11319         bp->msglevel = debug;
11320
11321         rc = bnx2x_init_dev(pdev, dev);
11322         if (rc < 0) {
11323                 free_netdev(dev);
11324                 return rc;
11325         }
11326
11327         pci_set_drvdata(pdev, dev);
11328
11329         rc = bnx2x_init_bp(bp);
11330         if (rc)
11331                 goto init_one_exit;
11332
11333         /* Set init arrays */
11334         rc = bnx2x_init_firmware(bp, &pdev->dev);
11335         if (rc) {
11336                 printk(KERN_ERR PFX "Error loading firmware\n");
11337                 goto init_one_exit;
11338         }
11339
11340         rc = register_netdev(dev);
11341         if (rc) {
11342                 dev_err(&pdev->dev, "Cannot register net device\n");
11343                 goto init_one_exit;
11344         }
11345
11346         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11347                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11348                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11349                bnx2x_get_pcie_width(bp),
11350                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11351                dev->base_addr, bp->pdev->irq);
11352         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11353
11354         return 0;
11355
11356 init_one_exit:
11357         if (bp->regview)
11358                 iounmap(bp->regview);
11359
11360         if (bp->doorbells)
11361                 iounmap(bp->doorbells);
11362
11363         free_netdev(dev);
11364
11365         if (atomic_read(&pdev->enable_cnt) == 1)
11366                 pci_release_regions(pdev);
11367
11368         pci_disable_device(pdev);
11369         pci_set_drvdata(pdev, NULL);
11370
11371         return rc;
11372 }
11373
11374 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11375 {
11376         struct net_device *dev = pci_get_drvdata(pdev);
11377         struct bnx2x *bp;
11378
11379         if (!dev) {
11380                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11381                 return;
11382         }
11383         bp = netdev_priv(dev);
11384
11385         unregister_netdev(dev);
11386
11387         kfree(bp->init_ops_offsets);
11388         kfree(bp->init_ops);
11389         kfree(bp->init_data);
11390         release_firmware(bp->firmware);
11391
11392         if (bp->regview)
11393                 iounmap(bp->regview);
11394
11395         if (bp->doorbells)
11396                 iounmap(bp->doorbells);
11397
11398         free_netdev(dev);
11399
11400         if (atomic_read(&pdev->enable_cnt) == 1)
11401                 pci_release_regions(pdev);
11402
11403         pci_disable_device(pdev);
11404         pci_set_drvdata(pdev, NULL);
11405 }
11406
11407 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11408 {
11409         struct net_device *dev = pci_get_drvdata(pdev);
11410         struct bnx2x *bp;
11411
11412         if (!dev) {
11413                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11414                 return -ENODEV;
11415         }
11416         bp = netdev_priv(dev);
11417
11418         rtnl_lock();
11419
11420         pci_save_state(pdev);
11421
11422         if (!netif_running(dev)) {
11423                 rtnl_unlock();
11424                 return 0;
11425         }
11426
11427         netif_device_detach(dev);
11428
11429         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11430
11431         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11432
11433         rtnl_unlock();
11434
11435         return 0;
11436 }
11437
11438 static int bnx2x_resume(struct pci_dev *pdev)
11439 {
11440         struct net_device *dev = pci_get_drvdata(pdev);
11441         struct bnx2x *bp;
11442         int rc;
11443
11444         if (!dev) {
11445                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11446                 return -ENODEV;
11447         }
11448         bp = netdev_priv(dev);
11449
11450         rtnl_lock();
11451
11452         pci_restore_state(pdev);
11453
11454         if (!netif_running(dev)) {
11455                 rtnl_unlock();
11456                 return 0;
11457         }
11458
11459         bnx2x_set_power_state(bp, PCI_D0);
11460         netif_device_attach(dev);
11461
11462         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11463
11464         rtnl_unlock();
11465
11466         return rc;
11467 }
11468
11469 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11470 {
11471         int i;
11472
11473         bp->state = BNX2X_STATE_ERROR;
11474
11475         bp->rx_mode = BNX2X_RX_MODE_NONE;
11476
11477         bnx2x_netif_stop(bp, 0);
11478
11479         del_timer_sync(&bp->timer);
11480         bp->stats_state = STATS_STATE_DISABLED;
11481         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11482
11483         /* Release IRQs */
11484         bnx2x_free_irq(bp);
11485
11486         if (CHIP_IS_E1(bp)) {
11487                 struct mac_configuration_cmd *config =
11488                                                 bnx2x_sp(bp, mcast_config);
11489
11490                 for (i = 0; i < config->hdr.length; i++)
11491                         CAM_INVALIDATE(config->config_table[i]);
11492         }
11493
11494         /* Free SKBs, SGEs, TPA pool and driver internals */
11495         bnx2x_free_skbs(bp);
11496         for_each_rx_queue(bp, i)
11497                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11498         for_each_rx_queue(bp, i)
11499                 netif_napi_del(&bnx2x_fp(bp, i, napi));
11500         bnx2x_free_mem(bp);
11501
11502         bp->state = BNX2X_STATE_CLOSED;
11503
11504         netif_carrier_off(bp->dev);
11505
11506         return 0;
11507 }
11508
11509 static void bnx2x_eeh_recover(struct bnx2x *bp)
11510 {
11511         u32 val;
11512
11513         mutex_init(&bp->port.phy_mutex);
11514
11515         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11516         bp->link_params.shmem_base = bp->common.shmem_base;
11517         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11518
11519         if (!bp->common.shmem_base ||
11520             (bp->common.shmem_base < 0xA0000) ||
11521             (bp->common.shmem_base >= 0xC0000)) {
11522                 BNX2X_DEV_INFO("MCP not active\n");
11523                 bp->flags |= NO_MCP_FLAG;
11524                 return;
11525         }
11526
11527         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11528         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11529                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11530                 BNX2X_ERR("BAD MCP validity signature\n");
11531
11532         if (!BP_NOMCP(bp)) {
11533                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11534                               & DRV_MSG_SEQ_NUMBER_MASK);
11535                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11536         }
11537 }
11538
11539 /**
11540  * bnx2x_io_error_detected - called when PCI error is detected
11541  * @pdev: Pointer to PCI device
11542  * @state: The current pci connection state
11543  *
11544  * This function is called after a PCI bus error affecting
11545  * this device has been detected.
11546  */
11547 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11548                                                 pci_channel_state_t state)
11549 {
11550         struct net_device *dev = pci_get_drvdata(pdev);
11551         struct bnx2x *bp = netdev_priv(dev);
11552
11553         rtnl_lock();
11554
11555         netif_device_detach(dev);
11556
11557         if (netif_running(dev))
11558                 bnx2x_eeh_nic_unload(bp);
11559
11560         pci_disable_device(pdev);
11561
11562         rtnl_unlock();
11563
11564         /* Request a slot reset */
11565         return PCI_ERS_RESULT_NEED_RESET;
11566 }
11567
11568 /**
11569  * bnx2x_io_slot_reset - called after the PCI bus has been reset
11570  * @pdev: Pointer to PCI device
11571  *
11572  * Restart the card from scratch, as if from a cold-boot.
11573  */
11574 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11575 {
11576         struct net_device *dev = pci_get_drvdata(pdev);
11577         struct bnx2x *bp = netdev_priv(dev);
11578
11579         rtnl_lock();
11580
11581         if (pci_enable_device(pdev)) {
11582                 dev_err(&pdev->dev,
11583                         "Cannot re-enable PCI device after reset\n");
11584                 rtnl_unlock();
11585                 return PCI_ERS_RESULT_DISCONNECT;
11586         }
11587
11588         pci_set_master(pdev);
11589         pci_restore_state(pdev);
11590
11591         if (netif_running(dev))
11592                 bnx2x_set_power_state(bp, PCI_D0);
11593
11594         rtnl_unlock();
11595
11596         return PCI_ERS_RESULT_RECOVERED;
11597 }
11598
11599 /**
11600  * bnx2x_io_resume - called when traffic can start flowing again
11601  * @pdev: Pointer to PCI device
11602  *
11603  * This callback is called when the error recovery driver tells us that
11604  * its OK to resume normal operation.
11605  */
11606 static void bnx2x_io_resume(struct pci_dev *pdev)
11607 {
11608         struct net_device *dev = pci_get_drvdata(pdev);
11609         struct bnx2x *bp = netdev_priv(dev);
11610
11611         rtnl_lock();
11612
11613         bnx2x_eeh_recover(bp);
11614
11615         if (netif_running(dev))
11616                 bnx2x_nic_load(bp, LOAD_NORMAL);
11617
11618         netif_device_attach(dev);
11619
11620         rtnl_unlock();
11621 }
11622
11623 static struct pci_error_handlers bnx2x_err_handler = {
11624         .error_detected = bnx2x_io_error_detected,
11625         .slot_reset     = bnx2x_io_slot_reset,
11626         .resume         = bnx2x_io_resume,
11627 };
11628
11629 static struct pci_driver bnx2x_pci_driver = {
11630         .name        = DRV_MODULE_NAME,
11631         .id_table    = bnx2x_pci_tbl,
11632         .probe       = bnx2x_init_one,
11633         .remove      = __devexit_p(bnx2x_remove_one),
11634         .suspend     = bnx2x_suspend,
11635         .resume      = bnx2x_resume,
11636         .err_handler = &bnx2x_err_handler,
11637 };
11638
11639 static int __init bnx2x_init(void)
11640 {
11641         int ret;
11642
11643         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11644         if (bnx2x_wq == NULL) {
11645                 printk(KERN_ERR PFX "Cannot create workqueue\n");
11646                 return -ENOMEM;
11647         }
11648
11649         ret = pci_register_driver(&bnx2x_pci_driver);
11650         if (ret) {
11651                 printk(KERN_ERR PFX "Cannot register driver\n");
11652                 destroy_workqueue(bnx2x_wq);
11653         }
11654         return ret;
11655 }
11656
11657 static void __exit bnx2x_cleanup(void)
11658 {
11659         pci_unregister_driver(&bnx2x_pci_driver);
11660
11661         destroy_workqueue(bnx2x_wq);
11662 }
11663
11664 module_init(bnx2x_init);
11665 module_exit(bnx2x_cleanup);
11666
11667