Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[pandora-kernel.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.105-1"
60 #define DRV_MODULE_RELDATE      "2009/04/22"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
84
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
88
89 static int int_mode;
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
93 static int poll;
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
96
97 static int mrrs = -1;
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
101 static int debug;
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
106
107 static struct workqueue_struct *bnx2x_wq;
108
109 enum bnx2x_board_type {
110         BCM57710 = 0,
111         BCM57711 = 1,
112         BCM57711E = 2,
113 };
114
115 /* indexed by board_type, above */
116 static struct {
117         char *name;
118 } board_info[] __devinitdata = {
119         { "Broadcom NetXtreme II BCM57710 XGb" },
120         { "Broadcom NetXtreme II BCM57711 XGb" },
121         { "Broadcom NetXtreme II BCM57711E XGb" }
122 };
123
124
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
132         { 0 }
133 };
134
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
140
141 /* used only at init
142  * locking is done by mcp
143  */
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145 {
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149                                PCICFG_VENDOR_ID_OFFSET);
150 }
151
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153 {
154         u32 val;
155
156         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159                                PCICFG_VENDOR_ID_OFFSET);
160
161         return val;
162 }
163
164 static const u32 dmae_reg_go_c[] = {
165         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169 };
170
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173                             int idx)
174 {
175         u32 cmd_offset;
176         int i;
177
178         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
182                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
184         }
185         REG_WR(bp, dmae_reg_go_c[idx], 1);
186 }
187
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189                       u32 len32)
190 {
191         struct dmae_command *dmae = &bp->init_dmae;
192         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
193         int cnt = 200;
194
195         if (!bp->dmae_ready) {
196                 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
199                    "  using indirect\n", dst_addr, len32);
200                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201                 return;
202         }
203
204         mutex_lock(&bp->dmae_mutex);
205
206         memset(dmae, 0, sizeof(struct dmae_command));
207
208         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211 #ifdef __BIG_ENDIAN
212                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
213 #else
214                         DMAE_CMD_ENDIANITY_DW_SWAP |
215 #endif
216                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218         dmae->src_addr_lo = U64_LO(dma_addr);
219         dmae->src_addr_hi = U64_HI(dma_addr);
220         dmae->dst_addr_lo = dst_addr >> 2;
221         dmae->dst_addr_hi = 0;
222         dmae->len = len32;
223         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225         dmae->comp_val = DMAE_COMP_VAL;
226
227         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
229                     "dst_addr [%x:%08x (%08x)]\n"
230            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
231            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
237
238         *wb_comp = 0;
239
240         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
241
242         udelay(5);
243
244         while (*wb_comp != DMAE_COMP_VAL) {
245                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
247                 if (!cnt) {
248                         BNX2X_ERR("DMAE timeout!\n");
249                         break;
250                 }
251                 cnt--;
252                 /* adjust delay for emulation/FPGA */
253                 if (CHIP_REV_IS_SLOW(bp))
254                         msleep(100);
255                 else
256                         udelay(5);
257         }
258
259         mutex_unlock(&bp->dmae_mutex);
260 }
261
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
263 {
264         struct dmae_command *dmae = &bp->init_dmae;
265         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
266         int cnt = 200;
267
268         if (!bp->dmae_ready) {
269                 u32 *data = bnx2x_sp(bp, wb_data[0]);
270                 int i;
271
272                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
273                    "  using indirect\n", src_addr, len32);
274                 for (i = 0; i < len32; i++)
275                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276                 return;
277         }
278
279         mutex_lock(&bp->dmae_mutex);
280
281         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282         memset(dmae, 0, sizeof(struct dmae_command));
283
284         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287 #ifdef __BIG_ENDIAN
288                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
289 #else
290                         DMAE_CMD_ENDIANITY_DW_SWAP |
291 #endif
292                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294         dmae->src_addr_lo = src_addr >> 2;
295         dmae->src_addr_hi = 0;
296         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298         dmae->len = len32;
299         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301         dmae->comp_val = DMAE_COMP_VAL;
302
303         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
305                     "dst_addr [%x:%08x (%08x)]\n"
306            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
307            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
310
311         *wb_comp = 0;
312
313         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
314
315         udelay(5);
316
317         while (*wb_comp != DMAE_COMP_VAL) {
318
319                 if (!cnt) {
320                         BNX2X_ERR("DMAE timeout!\n");
321                         break;
322                 }
323                 cnt--;
324                 /* adjust delay for emulation/FPGA */
325                 if (CHIP_REV_IS_SLOW(bp))
326                         msleep(100);
327                 else
328                         udelay(5);
329         }
330         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
333
334         mutex_unlock(&bp->dmae_mutex);
335 }
336
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339 {
340         u32 wb_write[2];
341
342         wb_write[0] = val_hi;
343         wb_write[1] = val_lo;
344         REG_WR_DMAE(bp, reg, wb_write, 2);
345 }
346
347 #ifdef USE_WB_RD
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349 {
350         u32 wb_data[2];
351
352         REG_RD_DMAE(bp, reg, wb_data, 2);
353
354         return HILO_U64(wb_data[0], wb_data[1]);
355 }
356 #endif
357
358 static int bnx2x_mc_assert(struct bnx2x *bp)
359 {
360         char last_idx;
361         int i, rc = 0;
362         u32 row0, row1, row2, row3;
363
364         /* XSTORM */
365         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
367         if (last_idx)
368                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370         /* print the asserts */
371         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374                               XSTORM_ASSERT_LIST_OFFSET(i));
375                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384                                   " 0x%08x 0x%08x 0x%08x\n",
385                                   i, row3, row2, row1, row0);
386                         rc++;
387                 } else {
388                         break;
389                 }
390         }
391
392         /* TSTORM */
393         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
395         if (last_idx)
396                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398         /* print the asserts */
399         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402                               TSTORM_ASSERT_LIST_OFFSET(i));
403                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412                                   " 0x%08x 0x%08x 0x%08x\n",
413                                   i, row3, row2, row1, row0);
414                         rc++;
415                 } else {
416                         break;
417                 }
418         }
419
420         /* CSTORM */
421         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
423         if (last_idx)
424                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426         /* print the asserts */
427         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430                               CSTORM_ASSERT_LIST_OFFSET(i));
431                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440                                   " 0x%08x 0x%08x 0x%08x\n",
441                                   i, row3, row2, row1, row0);
442                         rc++;
443                 } else {
444                         break;
445                 }
446         }
447
448         /* USTORM */
449         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450                            USTORM_ASSERT_LIST_INDEX_OFFSET);
451         if (last_idx)
452                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454         /* print the asserts */
455         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458                               USTORM_ASSERT_LIST_OFFSET(i));
459                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
461                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
463                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468                                   " 0x%08x 0x%08x 0x%08x\n",
469                                   i, row3, row2, row1, row0);
470                         rc++;
471                 } else {
472                         break;
473                 }
474         }
475
476         return rc;
477 }
478
479 static void bnx2x_fw_dump(struct bnx2x *bp)
480 {
481         u32 mark, offset;
482         __be32 data[9];
483         int word;
484
485         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486         mark = ((mark + 0x3) & ~0x3);
487         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
488
489         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490                 for (word = 0; word < 8; word++)
491                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492                                                   offset + 4*word));
493                 data[8] = 0x0;
494                 printk(KERN_CONT "%s", (char *)data);
495         }
496         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497                 for (word = 0; word < 8; word++)
498                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499                                                   offset + 4*word));
500                 data[8] = 0x0;
501                 printk(KERN_CONT "%s", (char *)data);
502         }
503         printk("\n" KERN_ERR PFX "end of fw dump\n");
504 }
505
506 static void bnx2x_panic_dump(struct bnx2x *bp)
507 {
508         int i;
509         u16 j, start, end;
510
511         bp->stats_state = STATS_STATE_DISABLED;
512         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
514         BNX2X_ERR("begin crash dump -----------------\n");
515
516         /* Indices */
517         /* Common */
518         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
519                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
520                   "  spq_prod_idx(%u)\n",
521                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524         /* Rx */
525         for_each_rx_queue(bp, i) {
526                 struct bnx2x_fastpath *fp = &bp->fp[i];
527
528                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
529                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
530                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
531                           i, fp->rx_bd_prod, fp->rx_bd_cons,
532                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
534                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
535                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
536                           fp->rx_sge_prod, fp->last_max_sge,
537                           le16_to_cpu(fp->fp_u_idx),
538                           fp->status_blk->u_status_block.status_block_index);
539         }
540
541         /* Tx */
542         for_each_tx_queue(bp, i) {
543                 struct bnx2x_fastpath *fp = &bp->fp[i];
544                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
545
546                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
547                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
548                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
550                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
551                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552                           fp->status_blk->c_status_block.status_block_index,
553                           hw_prods->packets_prod, hw_prods->bds_prod);
554         }
555
556         /* Rings */
557         /* Rx */
558         for_each_rx_queue(bp, i) {
559                 struct bnx2x_fastpath *fp = &bp->fp[i];
560
561                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
563                 for (j = start; j != end; j = RX_BD(j + 1)) {
564                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
567                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
568                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
569                 }
570
571                 start = RX_SGE(fp->rx_sge_prod);
572                 end = RX_SGE(fp->last_max_sge);
573                 for (j = start; j != end; j = RX_SGE(j + 1)) {
574                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
577                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
578                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
579                 }
580
581                 start = RCQ_BD(fp->rx_comp_cons - 10);
582                 end = RCQ_BD(fp->rx_comp_cons + 503);
583                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
584                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
586                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
588                 }
589         }
590
591         /* Tx */
592         for_each_tx_queue(bp, i) {
593                 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597                 for (j = start; j != end; j = TX_BD(j + 1)) {
598                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
600                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601                                   i, j, sw_bd->skb, sw_bd->first_bd);
602                 }
603
604                 start = TX_BD(fp->tx_bd_cons - 10);
605                 end = TX_BD(fp->tx_bd_cons + 254);
606                 for (j = start; j != end; j = TX_BD(j + 1)) {
607                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
609                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
611                 }
612         }
613
614         bnx2x_fw_dump(bp);
615         bnx2x_mc_assert(bp);
616         BNX2X_ERR("end crash dump -----------------\n");
617 }
618
619 static void bnx2x_int_enable(struct bnx2x *bp)
620 {
621         int port = BP_PORT(bp);
622         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623         u32 val = REG_RD(bp, addr);
624         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
625         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
626
627         if (msix) {
628                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629                          HC_CONFIG_0_REG_INT_LINE_EN_0);
630                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
632         } else if (msi) {
633                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
637         } else {
638                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644                    val, port, addr);
645
646                 REG_WR(bp, addr, val);
647
648                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649         }
650
651         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
652            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
653
654         REG_WR(bp, addr, val);
655
656         if (CHIP_IS_E1H(bp)) {
657                 /* init leading/trailing edge */
658                 if (IS_E1HMF(bp)) {
659                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
660                         if (bp->port.pmf)
661                                 /* enable nig and gpio3 attention */
662                                 val |= 0x1100;
663                 } else
664                         val = 0xffff;
665
666                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
667                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
668         }
669 }
670
671 static void bnx2x_int_disable(struct bnx2x *bp)
672 {
673         int port = BP_PORT(bp);
674         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
675         u32 val = REG_RD(bp, addr);
676
677         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
678                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
679                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
680                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
681
682         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
683            val, port, addr);
684
685         /* flush all outstanding writes */
686         mmiowb();
687
688         REG_WR(bp, addr, val);
689         if (REG_RD(bp, addr) != val)
690                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
691
692 }
693
694 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
695 {
696         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
697         int i, offset;
698
699         /* disable interrupt handling */
700         atomic_inc(&bp->intr_sem);
701         if (disable_hw)
702                 /* prevent the HW from sending interrupts */
703                 bnx2x_int_disable(bp);
704
705         /* make sure all ISRs are done */
706         if (msix) {
707                 synchronize_irq(bp->msix_table[0].vector);
708                 offset = 1;
709                 for_each_queue(bp, i)
710                         synchronize_irq(bp->msix_table[i + offset].vector);
711         } else
712                 synchronize_irq(bp->pdev->irq);
713
714         /* make sure sp_task is not running */
715         cancel_delayed_work(&bp->sp_task);
716         flush_workqueue(bnx2x_wq);
717 }
718
719 /* fast path */
720
721 /*
722  * General service functions
723  */
724
725 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
726                                 u8 storm, u16 index, u8 op, u8 update)
727 {
728         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
729                        COMMAND_REG_INT_ACK);
730         struct igu_ack_register igu_ack;
731
732         igu_ack.status_block_index = index;
733         igu_ack.sb_id_and_flags =
734                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
735                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
736                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
737                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
738
739         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
740            (*(u32 *)&igu_ack), hc_addr);
741         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
742 }
743
744 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
745 {
746         struct host_status_block *fpsb = fp->status_blk;
747         u16 rc = 0;
748
749         barrier(); /* status block is written to by the chip */
750         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
751                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
752                 rc |= 1;
753         }
754         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
755                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
756                 rc |= 2;
757         }
758         return rc;
759 }
760
761 static u16 bnx2x_ack_int(struct bnx2x *bp)
762 {
763         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
764                        COMMAND_REG_SIMD_MASK);
765         u32 result = REG_RD(bp, hc_addr);
766
767         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
768            result, hc_addr);
769
770         return result;
771 }
772
773
774 /*
775  * fast path service functions
776  */
777
778 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
779 {
780         u16 tx_cons_sb;
781
782         /* Tell compiler that status block fields can change */
783         barrier();
784         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
785         return (fp->tx_pkt_cons != tx_cons_sb);
786 }
787
788 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
789 {
790         /* Tell compiler that consumer and producer can change */
791         barrier();
792         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
793 }
794
795 /* free skb in the packet ring at pos idx
796  * return idx of last bd freed
797  */
798 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
799                              u16 idx)
800 {
801         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
802         struct eth_tx_bd *tx_bd;
803         struct sk_buff *skb = tx_buf->skb;
804         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
805         int nbd;
806
807         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
808            idx, tx_buf, skb);
809
810         /* unmap first bd */
811         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
812         tx_bd = &fp->tx_desc_ring[bd_idx];
813         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
814                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
815
816         nbd = le16_to_cpu(tx_bd->nbd) - 1;
817         new_cons = nbd + tx_buf->first_bd;
818 #ifdef BNX2X_STOP_ON_ERROR
819         if (nbd > (MAX_SKB_FRAGS + 2)) {
820                 BNX2X_ERR("BAD nbd!\n");
821                 bnx2x_panic();
822         }
823 #endif
824
825         /* Skip a parse bd and the TSO split header bd
826            since they have no mapping */
827         if (nbd)
828                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
829
830         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
831                                            ETH_TX_BD_FLAGS_TCP_CSUM |
832                                            ETH_TX_BD_FLAGS_SW_LSO)) {
833                 if (--nbd)
834                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
835                 tx_bd = &fp->tx_desc_ring[bd_idx];
836                 /* is this a TSO split header bd? */
837                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
838                         if (--nbd)
839                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
840                 }
841         }
842
843         /* now free frags */
844         while (nbd > 0) {
845
846                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
847                 tx_bd = &fp->tx_desc_ring[bd_idx];
848                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
849                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
850                 if (--nbd)
851                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852         }
853
854         /* release skb */
855         WARN_ON(!skb);
856         dev_kfree_skb(skb);
857         tx_buf->first_bd = 0;
858         tx_buf->skb = NULL;
859
860         return new_cons;
861 }
862
863 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
864 {
865         s16 used;
866         u16 prod;
867         u16 cons;
868
869         barrier(); /* Tell compiler that prod and cons can change */
870         prod = fp->tx_bd_prod;
871         cons = fp->tx_bd_cons;
872
873         /* NUM_TX_RINGS = number of "next-page" entries
874            It will be used as a threshold */
875         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
876
877 #ifdef BNX2X_STOP_ON_ERROR
878         WARN_ON(used < 0);
879         WARN_ON(used > fp->bp->tx_ring_size);
880         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
881 #endif
882
883         return (s16)(fp->bp->tx_ring_size) - used;
884 }
885
886 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
887 {
888         struct bnx2x *bp = fp->bp;
889         struct netdev_queue *txq;
890         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
891         int done = 0;
892
893 #ifdef BNX2X_STOP_ON_ERROR
894         if (unlikely(bp->panic))
895                 return;
896 #endif
897
898         txq = netdev_get_tx_queue(bp->dev, fp->index);
899         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
900         sw_cons = fp->tx_pkt_cons;
901
902         while (sw_cons != hw_cons) {
903                 u16 pkt_cons;
904
905                 pkt_cons = TX_BD(sw_cons);
906
907                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
908
909                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
910                    hw_cons, sw_cons, pkt_cons);
911
912 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
913                         rmb();
914                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
915                 }
916 */
917                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
918                 sw_cons++;
919                 done++;
920         }
921
922         fp->tx_pkt_cons = sw_cons;
923         fp->tx_bd_cons = bd_cons;
924
925         /* TBD need a thresh? */
926         if (unlikely(netif_tx_queue_stopped(txq))) {
927
928                 __netif_tx_lock(txq, smp_processor_id());
929
930                 /* Need to make the tx_bd_cons update visible to start_xmit()
931                  * before checking for netif_tx_queue_stopped().  Without the
932                  * memory barrier, there is a small possibility that
933                  * start_xmit() will miss it and cause the queue to be stopped
934                  * forever.
935                  */
936                 smp_mb();
937
938                 if ((netif_tx_queue_stopped(txq)) &&
939                     (bp->state == BNX2X_STATE_OPEN) &&
940                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
941                         netif_tx_wake_queue(txq);
942
943                 __netif_tx_unlock(txq);
944         }
945 }
946
947
948 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
949                            union eth_rx_cqe *rr_cqe)
950 {
951         struct bnx2x *bp = fp->bp;
952         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
953         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
954
955         DP(BNX2X_MSG_SP,
956            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
957            fp->index, cid, command, bp->state,
958            rr_cqe->ramrod_cqe.ramrod_type);
959
960         bp->spq_left++;
961
962         if (fp->index) {
963                 switch (command | fp->state) {
964                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
965                                                 BNX2X_FP_STATE_OPENING):
966                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
967                            cid);
968                         fp->state = BNX2X_FP_STATE_OPEN;
969                         break;
970
971                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
972                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
973                            cid);
974                         fp->state = BNX2X_FP_STATE_HALTED;
975                         break;
976
977                 default:
978                         BNX2X_ERR("unexpected MC reply (%d)  "
979                                   "fp->state is %x\n", command, fp->state);
980                         break;
981                 }
982                 mb(); /* force bnx2x_wait_ramrod() to see the change */
983                 return;
984         }
985
986         switch (command | bp->state) {
987         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
988                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
989                 bp->state = BNX2X_STATE_OPEN;
990                 break;
991
992         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
993                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
994                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
995                 fp->state = BNX2X_FP_STATE_HALTED;
996                 break;
997
998         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
999                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1000                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1001                 break;
1002
1003
1004         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1005         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1006                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1007                 bp->set_mac_pending = 0;
1008                 break;
1009
1010         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1011                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1012                 break;
1013
1014         default:
1015                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1016                           command, bp->state);
1017                 break;
1018         }
1019         mb(); /* force bnx2x_wait_ramrod() to see the change */
1020 }
1021
1022 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1023                                      struct bnx2x_fastpath *fp, u16 index)
1024 {
1025         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026         struct page *page = sw_buf->page;
1027         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1028
1029         /* Skip "next page" elements */
1030         if (!page)
1031                 return;
1032
1033         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1034                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1035         __free_pages(page, PAGES_PER_SGE_SHIFT);
1036
1037         sw_buf->page = NULL;
1038         sge->addr_hi = 0;
1039         sge->addr_lo = 0;
1040 }
1041
1042 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1043                                            struct bnx2x_fastpath *fp, int last)
1044 {
1045         int i;
1046
1047         for (i = 0; i < last; i++)
1048                 bnx2x_free_rx_sge(bp, fp, i);
1049 }
1050
1051 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1052                                      struct bnx2x_fastpath *fp, u16 index)
1053 {
1054         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1055         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1056         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1057         dma_addr_t mapping;
1058
1059         if (unlikely(page == NULL))
1060                 return -ENOMEM;
1061
1062         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1063                                PCI_DMA_FROMDEVICE);
1064         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1065                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1066                 return -ENOMEM;
1067         }
1068
1069         sw_buf->page = page;
1070         pci_unmap_addr_set(sw_buf, mapping, mapping);
1071
1072         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1073         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1074
1075         return 0;
1076 }
1077
1078 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1079                                      struct bnx2x_fastpath *fp, u16 index)
1080 {
1081         struct sk_buff *skb;
1082         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1083         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1084         dma_addr_t mapping;
1085
1086         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1087         if (unlikely(skb == NULL))
1088                 return -ENOMEM;
1089
1090         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1091                                  PCI_DMA_FROMDEVICE);
1092         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1093                 dev_kfree_skb(skb);
1094                 return -ENOMEM;
1095         }
1096
1097         rx_buf->skb = skb;
1098         pci_unmap_addr_set(rx_buf, mapping, mapping);
1099
1100         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1101         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1102
1103         return 0;
1104 }
1105
1106 /* note that we are not allocating a new skb,
1107  * we are just moving one from cons to prod
1108  * we are not creating a new mapping,
1109  * so there is no need to check for dma_mapping_error().
1110  */
1111 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1112                                struct sk_buff *skb, u16 cons, u16 prod)
1113 {
1114         struct bnx2x *bp = fp->bp;
1115         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1116         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1117         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1118         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1119
1120         pci_dma_sync_single_for_device(bp->pdev,
1121                                        pci_unmap_addr(cons_rx_buf, mapping),
1122                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1123
1124         prod_rx_buf->skb = cons_rx_buf->skb;
1125         pci_unmap_addr_set(prod_rx_buf, mapping,
1126                            pci_unmap_addr(cons_rx_buf, mapping));
1127         *prod_bd = *cons_bd;
1128 }
1129
1130 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1131                                              u16 idx)
1132 {
1133         u16 last_max = fp->last_max_sge;
1134
1135         if (SUB_S16(idx, last_max) > 0)
1136                 fp->last_max_sge = idx;
1137 }
1138
1139 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1140 {
1141         int i, j;
1142
1143         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1144                 int idx = RX_SGE_CNT * i - 1;
1145
1146                 for (j = 0; j < 2; j++) {
1147                         SGE_MASK_CLEAR_BIT(fp, idx);
1148                         idx--;
1149                 }
1150         }
1151 }
1152
1153 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1154                                   struct eth_fast_path_rx_cqe *fp_cqe)
1155 {
1156         struct bnx2x *bp = fp->bp;
1157         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1158                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1159                       SGE_PAGE_SHIFT;
1160         u16 last_max, last_elem, first_elem;
1161         u16 delta = 0;
1162         u16 i;
1163
1164         if (!sge_len)
1165                 return;
1166
1167         /* First mark all used pages */
1168         for (i = 0; i < sge_len; i++)
1169                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1170
1171         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1172            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1173
1174         /* Here we assume that the last SGE index is the biggest */
1175         prefetch((void *)(fp->sge_mask));
1176         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1177
1178         last_max = RX_SGE(fp->last_max_sge);
1179         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1180         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1181
1182         /* If ring is not full */
1183         if (last_elem + 1 != first_elem)
1184                 last_elem++;
1185
1186         /* Now update the prod */
1187         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1188                 if (likely(fp->sge_mask[i]))
1189                         break;
1190
1191                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1192                 delta += RX_SGE_MASK_ELEM_SZ;
1193         }
1194
1195         if (delta > 0) {
1196                 fp->rx_sge_prod += delta;
1197                 /* clear page-end entries */
1198                 bnx2x_clear_sge_mask_next_elems(fp);
1199         }
1200
1201         DP(NETIF_MSG_RX_STATUS,
1202            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1203            fp->last_max_sge, fp->rx_sge_prod);
1204 }
1205
1206 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1207 {
1208         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1209         memset(fp->sge_mask, 0xff,
1210                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1211
1212         /* Clear the two last indices in the page to 1:
1213            these are the indices that correspond to the "next" element,
1214            hence will never be indicated and should be removed from
1215            the calculations. */
1216         bnx2x_clear_sge_mask_next_elems(fp);
1217 }
1218
1219 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1220                             struct sk_buff *skb, u16 cons, u16 prod)
1221 {
1222         struct bnx2x *bp = fp->bp;
1223         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1224         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1225         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1226         dma_addr_t mapping;
1227
1228         /* move empty skb from pool to prod and map it */
1229         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1230         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1231                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1232         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1233
1234         /* move partial skb from cons to pool (don't unmap yet) */
1235         fp->tpa_pool[queue] = *cons_rx_buf;
1236
1237         /* mark bin state as start - print error if current state != stop */
1238         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1239                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1240
1241         fp->tpa_state[queue] = BNX2X_TPA_START;
1242
1243         /* point prod_bd to new skb */
1244         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1245         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1246
1247 #ifdef BNX2X_STOP_ON_ERROR
1248         fp->tpa_queue_used |= (1 << queue);
1249 #ifdef __powerpc64__
1250         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1251 #else
1252         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1253 #endif
1254            fp->tpa_queue_used);
1255 #endif
1256 }
1257
1258 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1259                                struct sk_buff *skb,
1260                                struct eth_fast_path_rx_cqe *fp_cqe,
1261                                u16 cqe_idx)
1262 {
1263         struct sw_rx_page *rx_pg, old_rx_pg;
1264         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1265         u32 i, frag_len, frag_size, pages;
1266         int err;
1267         int j;
1268
1269         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1270         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1271
1272         /* This is needed in order to enable forwarding support */
1273         if (frag_size)
1274                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1275                                                max(frag_size, (u32)len_on_bd));
1276
1277 #ifdef BNX2X_STOP_ON_ERROR
1278         if (pages >
1279             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1280                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1281                           pages, cqe_idx);
1282                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1283                           fp_cqe->pkt_len, len_on_bd);
1284                 bnx2x_panic();
1285                 return -EINVAL;
1286         }
1287 #endif
1288
1289         /* Run through the SGL and compose the fragmented skb */
1290         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1291                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1292
1293                 /* FW gives the indices of the SGE as if the ring is an array
1294                    (meaning that "next" element will consume 2 indices) */
1295                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1296                 rx_pg = &fp->rx_page_ring[sge_idx];
1297                 old_rx_pg = *rx_pg;
1298
1299                 /* If we fail to allocate a substitute page, we simply stop
1300                    where we are and drop the whole packet */
1301                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1302                 if (unlikely(err)) {
1303                         fp->eth_q_stats.rx_skb_alloc_failed++;
1304                         return err;
1305                 }
1306
1307                 /* Unmap the page as we r going to pass it to the stack */
1308                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1309                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1310
1311                 /* Add one frag and update the appropriate fields in the skb */
1312                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1313
1314                 skb->data_len += frag_len;
1315                 skb->truesize += frag_len;
1316                 skb->len += frag_len;
1317
1318                 frag_size -= frag_len;
1319         }
1320
1321         return 0;
1322 }
1323
1324 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1325                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1326                            u16 cqe_idx)
1327 {
1328         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1329         struct sk_buff *skb = rx_buf->skb;
1330         /* alloc new skb */
1331         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1332
1333         /* Unmap skb in the pool anyway, as we are going to change
1334            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1335            fails. */
1336         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1337                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1338
1339         if (likely(new_skb)) {
1340                 /* fix ip xsum and give it to the stack */
1341                 /* (no need to map the new skb) */
1342 #ifdef BCM_VLAN
1343                 int is_vlan_cqe =
1344                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1345                          PARSING_FLAGS_VLAN);
1346                 int is_not_hwaccel_vlan_cqe =
1347                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1348 #endif
1349
1350                 prefetch(skb);
1351                 prefetch(((char *)(skb)) + 128);
1352
1353 #ifdef BNX2X_STOP_ON_ERROR
1354                 if (pad + len > bp->rx_buf_size) {
1355                         BNX2X_ERR("skb_put is about to fail...  "
1356                                   "pad %d  len %d  rx_buf_size %d\n",
1357                                   pad, len, bp->rx_buf_size);
1358                         bnx2x_panic();
1359                         return;
1360                 }
1361 #endif
1362
1363                 skb_reserve(skb, pad);
1364                 skb_put(skb, len);
1365
1366                 skb->protocol = eth_type_trans(skb, bp->dev);
1367                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1368
1369                 {
1370                         struct iphdr *iph;
1371
1372                         iph = (struct iphdr *)skb->data;
1373 #ifdef BCM_VLAN
1374                         /* If there is no Rx VLAN offloading -
1375                            take VLAN tag into an account */
1376                         if (unlikely(is_not_hwaccel_vlan_cqe))
1377                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1378 #endif
1379                         iph->check = 0;
1380                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1381                 }
1382
1383                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1384                                          &cqe->fast_path_cqe, cqe_idx)) {
1385 #ifdef BCM_VLAN
1386                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1387                             (!is_not_hwaccel_vlan_cqe))
1388                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1389                                                 le16_to_cpu(cqe->fast_path_cqe.
1390                                                             vlan_tag));
1391                         else
1392 #endif
1393                                 netif_receive_skb(skb);
1394                 } else {
1395                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1396                            " - dropping packet!\n");
1397                         dev_kfree_skb(skb);
1398                 }
1399
1400
1401                 /* put new skb in bin */
1402                 fp->tpa_pool[queue].skb = new_skb;
1403
1404         } else {
1405                 /* else drop the packet and keep the buffer in the bin */
1406                 DP(NETIF_MSG_RX_STATUS,
1407                    "Failed to allocate new skb - dropping packet!\n");
1408                 fp->eth_q_stats.rx_skb_alloc_failed++;
1409         }
1410
1411         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1412 }
1413
1414 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1415                                         struct bnx2x_fastpath *fp,
1416                                         u16 bd_prod, u16 rx_comp_prod,
1417                                         u16 rx_sge_prod)
1418 {
1419         struct ustorm_eth_rx_producers rx_prods = {0};
1420         int i;
1421
1422         /* Update producers */
1423         rx_prods.bd_prod = bd_prod;
1424         rx_prods.cqe_prod = rx_comp_prod;
1425         rx_prods.sge_prod = rx_sge_prod;
1426
1427         /*
1428          * Make sure that the BD and SGE data is updated before updating the
1429          * producers since FW might read the BD/SGE right after the producer
1430          * is updated.
1431          * This is only applicable for weak-ordered memory model archs such
1432          * as IA-64. The following barrier is also mandatory since FW will
1433          * assumes BDs must have buffers.
1434          */
1435         wmb();
1436
1437         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1438                 REG_WR(bp, BAR_USTRORM_INTMEM +
1439                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1440                        ((u32 *)&rx_prods)[i]);
1441
1442         mmiowb(); /* keep prod updates ordered */
1443
1444         DP(NETIF_MSG_RX_STATUS,
1445            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1446            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1447 }
1448
1449 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1450 {
1451         struct bnx2x *bp = fp->bp;
1452         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1453         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1454         int rx_pkt = 0;
1455
1456 #ifdef BNX2X_STOP_ON_ERROR
1457         if (unlikely(bp->panic))
1458                 return 0;
1459 #endif
1460
1461         /* CQ "next element" is of the size of the regular element,
1462            that's why it's ok here */
1463         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1464         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1465                 hw_comp_cons++;
1466
1467         bd_cons = fp->rx_bd_cons;
1468         bd_prod = fp->rx_bd_prod;
1469         bd_prod_fw = bd_prod;
1470         sw_comp_cons = fp->rx_comp_cons;
1471         sw_comp_prod = fp->rx_comp_prod;
1472
1473         /* Memory barrier necessary as speculative reads of the rx
1474          * buffer can be ahead of the index in the status block
1475          */
1476         rmb();
1477
1478         DP(NETIF_MSG_RX_STATUS,
1479            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1480            fp->index, hw_comp_cons, sw_comp_cons);
1481
1482         while (sw_comp_cons != hw_comp_cons) {
1483                 struct sw_rx_bd *rx_buf = NULL;
1484                 struct sk_buff *skb;
1485                 union eth_rx_cqe *cqe;
1486                 u8 cqe_fp_flags;
1487                 u16 len, pad;
1488
1489                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1490                 bd_prod = RX_BD(bd_prod);
1491                 bd_cons = RX_BD(bd_cons);
1492
1493                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1494                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1495
1496                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1497                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1498                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1499                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1500                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1501                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1502
1503                 /* is this a slowpath msg? */
1504                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1505                         bnx2x_sp_event(fp, cqe);
1506                         goto next_cqe;
1507
1508                 /* this is an rx packet */
1509                 } else {
1510                         rx_buf = &fp->rx_buf_ring[bd_cons];
1511                         skb = rx_buf->skb;
1512                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1513                         pad = cqe->fast_path_cqe.placement_offset;
1514
1515                         /* If CQE is marked both TPA_START and TPA_END
1516                            it is a non-TPA CQE */
1517                         if ((!fp->disable_tpa) &&
1518                             (TPA_TYPE(cqe_fp_flags) !=
1519                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1520                                 u16 queue = cqe->fast_path_cqe.queue_index;
1521
1522                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1523                                         DP(NETIF_MSG_RX_STATUS,
1524                                            "calling tpa_start on queue %d\n",
1525                                            queue);
1526
1527                                         bnx2x_tpa_start(fp, queue, skb,
1528                                                         bd_cons, bd_prod);
1529                                         goto next_rx;
1530                                 }
1531
1532                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1533                                         DP(NETIF_MSG_RX_STATUS,
1534                                            "calling tpa_stop on queue %d\n",
1535                                            queue);
1536
1537                                         if (!BNX2X_RX_SUM_FIX(cqe))
1538                                                 BNX2X_ERR("STOP on none TCP "
1539                                                           "data\n");
1540
1541                                         /* This is a size of the linear data
1542                                            on this skb */
1543                                         len = le16_to_cpu(cqe->fast_path_cqe.
1544                                                                 len_on_bd);
1545                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1546                                                     len, cqe, comp_ring_cons);
1547 #ifdef BNX2X_STOP_ON_ERROR
1548                                         if (bp->panic)
1549                                                 return 0;
1550 #endif
1551
1552                                         bnx2x_update_sge_prod(fp,
1553                                                         &cqe->fast_path_cqe);
1554                                         goto next_cqe;
1555                                 }
1556                         }
1557
1558                         pci_dma_sync_single_for_device(bp->pdev,
1559                                         pci_unmap_addr(rx_buf, mapping),
1560                                                        pad + RX_COPY_THRESH,
1561                                                        PCI_DMA_FROMDEVICE);
1562                         prefetch(skb);
1563                         prefetch(((char *)(skb)) + 128);
1564
1565                         /* is this an error packet? */
1566                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1567                                 DP(NETIF_MSG_RX_ERR,
1568                                    "ERROR  flags %x  rx packet %u\n",
1569                                    cqe_fp_flags, sw_comp_cons);
1570                                 fp->eth_q_stats.rx_err_discard_pkt++;
1571                                 goto reuse_rx;
1572                         }
1573
1574                         /* Since we don't have a jumbo ring
1575                          * copy small packets if mtu > 1500
1576                          */
1577                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1578                             (len <= RX_COPY_THRESH)) {
1579                                 struct sk_buff *new_skb;
1580
1581                                 new_skb = netdev_alloc_skb(bp->dev,
1582                                                            len + pad);
1583                                 if (new_skb == NULL) {
1584                                         DP(NETIF_MSG_RX_ERR,
1585                                            "ERROR  packet dropped "
1586                                            "because of alloc failure\n");
1587                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1588                                         goto reuse_rx;
1589                                 }
1590
1591                                 /* aligned copy */
1592                                 skb_copy_from_linear_data_offset(skb, pad,
1593                                                     new_skb->data + pad, len);
1594                                 skb_reserve(new_skb, pad);
1595                                 skb_put(new_skb, len);
1596
1597                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1598
1599                                 skb = new_skb;
1600
1601                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1602                                 pci_unmap_single(bp->pdev,
1603                                         pci_unmap_addr(rx_buf, mapping),
1604                                                  bp->rx_buf_size,
1605                                                  PCI_DMA_FROMDEVICE);
1606                                 skb_reserve(skb, pad);
1607                                 skb_put(skb, len);
1608
1609                         } else {
1610                                 DP(NETIF_MSG_RX_ERR,
1611                                    "ERROR  packet dropped because "
1612                                    "of alloc failure\n");
1613                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1614 reuse_rx:
1615                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1616                                 goto next_rx;
1617                         }
1618
1619                         skb->protocol = eth_type_trans(skb, bp->dev);
1620
1621                         skb->ip_summed = CHECKSUM_NONE;
1622                         if (bp->rx_csum) {
1623                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1624                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1625                                 else
1626                                         fp->eth_q_stats.hw_csum_err++;
1627                         }
1628                 }
1629
1630                 skb_record_rx_queue(skb, fp->index);
1631 #ifdef BCM_VLAN
1632                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1633                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1634                      PARSING_FLAGS_VLAN))
1635                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1636                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1637                 else
1638 #endif
1639                         netif_receive_skb(skb);
1640
1641
1642 next_rx:
1643                 rx_buf->skb = NULL;
1644
1645                 bd_cons = NEXT_RX_IDX(bd_cons);
1646                 bd_prod = NEXT_RX_IDX(bd_prod);
1647                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1648                 rx_pkt++;
1649 next_cqe:
1650                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1651                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1652
1653                 if (rx_pkt == budget)
1654                         break;
1655         } /* while */
1656
1657         fp->rx_bd_cons = bd_cons;
1658         fp->rx_bd_prod = bd_prod_fw;
1659         fp->rx_comp_cons = sw_comp_cons;
1660         fp->rx_comp_prod = sw_comp_prod;
1661
1662         /* Update producers */
1663         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1664                              fp->rx_sge_prod);
1665
1666         fp->rx_pkt += rx_pkt;
1667         fp->rx_calls++;
1668
1669         return rx_pkt;
1670 }
1671
1672 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1673 {
1674         struct bnx2x_fastpath *fp = fp_cookie;
1675         struct bnx2x *bp = fp->bp;
1676         int index = fp->index;
1677
1678         /* Return here if interrupt is disabled */
1679         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1680                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1681                 return IRQ_HANDLED;
1682         }
1683
1684         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1685            index, fp->sb_id);
1686         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1687
1688 #ifdef BNX2X_STOP_ON_ERROR
1689         if (unlikely(bp->panic))
1690                 return IRQ_HANDLED;
1691 #endif
1692
1693         prefetch(fp->rx_cons_sb);
1694         prefetch(fp->tx_cons_sb);
1695         prefetch(&fp->status_blk->c_status_block.status_block_index);
1696         prefetch(&fp->status_blk->u_status_block.status_block_index);
1697
1698         napi_schedule(&bnx2x_fp(bp, index, napi));
1699
1700         return IRQ_HANDLED;
1701 }
1702
1703 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1704 {
1705         struct bnx2x *bp = netdev_priv(dev_instance);
1706         u16 status = bnx2x_ack_int(bp);
1707         u16 mask;
1708
1709         /* Return here if interrupt is shared and it's not for us */
1710         if (unlikely(status == 0)) {
1711                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1712                 return IRQ_NONE;
1713         }
1714         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1715
1716         /* Return here if interrupt is disabled */
1717         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1718                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1719                 return IRQ_HANDLED;
1720         }
1721
1722 #ifdef BNX2X_STOP_ON_ERROR
1723         if (unlikely(bp->panic))
1724                 return IRQ_HANDLED;
1725 #endif
1726
1727         mask = 0x2 << bp->fp[0].sb_id;
1728         if (status & mask) {
1729                 struct bnx2x_fastpath *fp = &bp->fp[0];
1730
1731                 prefetch(fp->rx_cons_sb);
1732                 prefetch(fp->tx_cons_sb);
1733                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1734                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1735
1736                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1737
1738                 status &= ~mask;
1739         }
1740
1741
1742         if (unlikely(status & 0x1)) {
1743                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1744
1745                 status &= ~0x1;
1746                 if (!status)
1747                         return IRQ_HANDLED;
1748         }
1749
1750         if (status)
1751                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1752                    status);
1753
1754         return IRQ_HANDLED;
1755 }
1756
1757 /* end of fast path */
1758
1759 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1760
1761 /* Link */
1762
1763 /*
1764  * General service functions
1765  */
1766
1767 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1768 {
1769         u32 lock_status;
1770         u32 resource_bit = (1 << resource);
1771         int func = BP_FUNC(bp);
1772         u32 hw_lock_control_reg;
1773         int cnt;
1774
1775         /* Validating that the resource is within range */
1776         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1777                 DP(NETIF_MSG_HW,
1778                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1779                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1780                 return -EINVAL;
1781         }
1782
1783         if (func <= 5) {
1784                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1785         } else {
1786                 hw_lock_control_reg =
1787                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1788         }
1789
1790         /* Validating that the resource is not already taken */
1791         lock_status = REG_RD(bp, hw_lock_control_reg);
1792         if (lock_status & resource_bit) {
1793                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1794                    lock_status, resource_bit);
1795                 return -EEXIST;
1796         }
1797
1798         /* Try for 5 second every 5ms */
1799         for (cnt = 0; cnt < 1000; cnt++) {
1800                 /* Try to acquire the lock */
1801                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1802                 lock_status = REG_RD(bp, hw_lock_control_reg);
1803                 if (lock_status & resource_bit)
1804                         return 0;
1805
1806                 msleep(5);
1807         }
1808         DP(NETIF_MSG_HW, "Timeout\n");
1809         return -EAGAIN;
1810 }
1811
1812 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1813 {
1814         u32 lock_status;
1815         u32 resource_bit = (1 << resource);
1816         int func = BP_FUNC(bp);
1817         u32 hw_lock_control_reg;
1818
1819         /* Validating that the resource is within range */
1820         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1821                 DP(NETIF_MSG_HW,
1822                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1823                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1824                 return -EINVAL;
1825         }
1826
1827         if (func <= 5) {
1828                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1829         } else {
1830                 hw_lock_control_reg =
1831                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1832         }
1833
1834         /* Validating that the resource is currently taken */
1835         lock_status = REG_RD(bp, hw_lock_control_reg);
1836         if (!(lock_status & resource_bit)) {
1837                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1838                    lock_status, resource_bit);
1839                 return -EFAULT;
1840         }
1841
1842         REG_WR(bp, hw_lock_control_reg, resource_bit);
1843         return 0;
1844 }
1845
1846 /* HW Lock for shared dual port PHYs */
1847 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1848 {
1849         mutex_lock(&bp->port.phy_mutex);
1850
1851         if (bp->port.need_hw_lock)
1852                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1853 }
1854
1855 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1856 {
1857         if (bp->port.need_hw_lock)
1858                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1859
1860         mutex_unlock(&bp->port.phy_mutex);
1861 }
1862
1863 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1864 {
1865         /* The GPIO should be swapped if swap register is set and active */
1866         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1867                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1868         int gpio_shift = gpio_num +
1869                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1870         u32 gpio_mask = (1 << gpio_shift);
1871         u32 gpio_reg;
1872         int value;
1873
1874         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1875                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1876                 return -EINVAL;
1877         }
1878
1879         /* read GPIO value */
1880         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1881
1882         /* get the requested pin value */
1883         if ((gpio_reg & gpio_mask) == gpio_mask)
1884                 value = 1;
1885         else
1886                 value = 0;
1887
1888         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1889
1890         return value;
1891 }
1892
1893 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1894 {
1895         /* The GPIO should be swapped if swap register is set and active */
1896         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1897                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1898         int gpio_shift = gpio_num +
1899                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1900         u32 gpio_mask = (1 << gpio_shift);
1901         u32 gpio_reg;
1902
1903         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1904                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1905                 return -EINVAL;
1906         }
1907
1908         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1909         /* read GPIO and mask except the float bits */
1910         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1911
1912         switch (mode) {
1913         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1914                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1915                    gpio_num, gpio_shift);
1916                 /* clear FLOAT and set CLR */
1917                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1918                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1919                 break;
1920
1921         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1922                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1923                    gpio_num, gpio_shift);
1924                 /* clear FLOAT and set SET */
1925                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1926                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1927                 break;
1928
1929         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1930                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1931                    gpio_num, gpio_shift);
1932                 /* set FLOAT */
1933                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1934                 break;
1935
1936         default:
1937                 break;
1938         }
1939
1940         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1941         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1942
1943         return 0;
1944 }
1945
1946 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1947 {
1948         /* The GPIO should be swapped if swap register is set and active */
1949         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1950                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1951         int gpio_shift = gpio_num +
1952                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1953         u32 gpio_mask = (1 << gpio_shift);
1954         u32 gpio_reg;
1955
1956         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1957                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1958                 return -EINVAL;
1959         }
1960
1961         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1962         /* read GPIO int */
1963         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1964
1965         switch (mode) {
1966         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1967                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1968                                    "output low\n", gpio_num, gpio_shift);
1969                 /* clear SET and set CLR */
1970                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1971                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972                 break;
1973
1974         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1975                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1976                                    "output high\n", gpio_num, gpio_shift);
1977                 /* clear CLR and set SET */
1978                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1979                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1980                 break;
1981
1982         default:
1983                 break;
1984         }
1985
1986         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1987         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1988
1989         return 0;
1990 }
1991
1992 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1993 {
1994         u32 spio_mask = (1 << spio_num);
1995         u32 spio_reg;
1996
1997         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1998             (spio_num > MISC_REGISTERS_SPIO_7)) {
1999                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2000                 return -EINVAL;
2001         }
2002
2003         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2004         /* read SPIO and mask except the float bits */
2005         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2006
2007         switch (mode) {
2008         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2009                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2010                 /* clear FLOAT and set CLR */
2011                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2013                 break;
2014
2015         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2016                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2017                 /* clear FLOAT and set SET */
2018                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2020                 break;
2021
2022         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2023                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2024                 /* set FLOAT */
2025                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026                 break;
2027
2028         default:
2029                 break;
2030         }
2031
2032         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2033         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2034
2035         return 0;
2036 }
2037
2038 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2039 {
2040         switch (bp->link_vars.ieee_fc &
2041                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2042         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2043                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2044                                           ADVERTISED_Pause);
2045                 break;
2046
2047         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2048                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2049                                          ADVERTISED_Pause);
2050                 break;
2051
2052         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2053                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2054                 break;
2055
2056         default:
2057                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2058                                           ADVERTISED_Pause);
2059                 break;
2060         }
2061 }
2062
2063 static void bnx2x_link_report(struct bnx2x *bp)
2064 {
2065         if (bp->link_vars.link_up) {
2066                 if (bp->state == BNX2X_STATE_OPEN)
2067                         netif_carrier_on(bp->dev);
2068                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2069
2070                 printk("%d Mbps ", bp->link_vars.line_speed);
2071
2072                 if (bp->link_vars.duplex == DUPLEX_FULL)
2073                         printk("full duplex");
2074                 else
2075                         printk("half duplex");
2076
2077                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2078                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2079                                 printk(", receive ");
2080                                 if (bp->link_vars.flow_ctrl &
2081                                     BNX2X_FLOW_CTRL_TX)
2082                                         printk("& transmit ");
2083                         } else {
2084                                 printk(", transmit ");
2085                         }
2086                         printk("flow control ON");
2087                 }
2088                 printk("\n");
2089
2090         } else { /* link_down */
2091                 netif_carrier_off(bp->dev);
2092                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2093         }
2094 }
2095
2096 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2097 {
2098         if (!BP_NOMCP(bp)) {
2099                 u8 rc;
2100
2101                 /* Initialize link parameters structure variables */
2102                 /* It is recommended to turn off RX FC for jumbo frames
2103                    for better performance */
2104                 if (IS_E1HMF(bp))
2105                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2106                 else if (bp->dev->mtu > 5000)
2107                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2108                 else
2109                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2110
2111                 bnx2x_acquire_phy_lock(bp);
2112
2113                 if (load_mode == LOAD_DIAG)
2114                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2115
2116                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2117
2118                 bnx2x_release_phy_lock(bp);
2119
2120                 bnx2x_calc_fc_adv(bp);
2121
2122                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2123                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2124                         bnx2x_link_report(bp);
2125                 }
2126
2127                 return rc;
2128         }
2129         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2130         return -EINVAL;
2131 }
2132
2133 static void bnx2x_link_set(struct bnx2x *bp)
2134 {
2135         if (!BP_NOMCP(bp)) {
2136                 bnx2x_acquire_phy_lock(bp);
2137                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2138                 bnx2x_release_phy_lock(bp);
2139
2140                 bnx2x_calc_fc_adv(bp);
2141         } else
2142                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2143 }
2144
2145 static void bnx2x__link_reset(struct bnx2x *bp)
2146 {
2147         if (!BP_NOMCP(bp)) {
2148                 bnx2x_acquire_phy_lock(bp);
2149                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2150                 bnx2x_release_phy_lock(bp);
2151         } else
2152                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2153 }
2154
2155 static u8 bnx2x_link_test(struct bnx2x *bp)
2156 {
2157         u8 rc;
2158
2159         bnx2x_acquire_phy_lock(bp);
2160         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2161         bnx2x_release_phy_lock(bp);
2162
2163         return rc;
2164 }
2165
2166 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2167 {
2168         u32 r_param = bp->link_vars.line_speed / 8;
2169         u32 fair_periodic_timeout_usec;
2170         u32 t_fair;
2171
2172         memset(&(bp->cmng.rs_vars), 0,
2173                sizeof(struct rate_shaping_vars_per_port));
2174         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2175
2176         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2177         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2178
2179         /* this is the threshold below which no timer arming will occur
2180            1.25 coefficient is for the threshold to be a little bigger
2181            than the real time, to compensate for timer in-accuracy */
2182         bp->cmng.rs_vars.rs_threshold =
2183                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2184
2185         /* resolution of fairness timer */
2186         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2187         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2188         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2189
2190         /* this is the threshold below which we won't arm the timer anymore */
2191         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2192
2193         /* we multiply by 1e3/8 to get bytes/msec.
2194            We don't want the credits to pass a credit
2195            of the t_fair*FAIR_MEM (algorithm resolution) */
2196         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2197         /* since each tick is 4 usec */
2198         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2199 }
2200
2201 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2202 {
2203         struct rate_shaping_vars_per_vn m_rs_vn;
2204         struct fairness_vars_per_vn m_fair_vn;
2205         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2206         u16 vn_min_rate, vn_max_rate;
2207         int i;
2208
2209         /* If function is hidden - set min and max to zeroes */
2210         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2211                 vn_min_rate = 0;
2212                 vn_max_rate = 0;
2213
2214         } else {
2215                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2216                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2217                 /* If fairness is enabled (not all min rates are zeroes) and
2218                    if current min rate is zero - set it to 1.
2219                    This is a requirement of the algorithm. */
2220                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2221                         vn_min_rate = DEF_MIN_RATE;
2222                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2223                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2224         }
2225
2226         DP(NETIF_MSG_IFUP,
2227            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2228            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2229
2230         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2231         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2232
2233         /* global vn counter - maximal Mbps for this vn */
2234         m_rs_vn.vn_counter.rate = vn_max_rate;
2235
2236         /* quota - number of bytes transmitted in this period */
2237         m_rs_vn.vn_counter.quota =
2238                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2239
2240         if (bp->vn_weight_sum) {
2241                 /* credit for each period of the fairness algorithm:
2242                    number of bytes in T_FAIR (the vn share the port rate).
2243                    vn_weight_sum should not be larger than 10000, thus
2244                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2245                    than zero */
2246                 m_fair_vn.vn_credit_delta =
2247                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2248                                                  (8 * bp->vn_weight_sum))),
2249                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2250                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2251                    m_fair_vn.vn_credit_delta);
2252         }
2253
2254         /* Store it to internal memory */
2255         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2256                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2257                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2258                        ((u32 *)(&m_rs_vn))[i]);
2259
2260         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2261                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2262                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2263                        ((u32 *)(&m_fair_vn))[i]);
2264 }
2265
2266
2267 /* This function is called upon link interrupt */
2268 static void bnx2x_link_attn(struct bnx2x *bp)
2269 {
2270         /* Make sure that we are synced with the current statistics */
2271         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2272
2273         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2274
2275         if (bp->link_vars.link_up) {
2276
2277                 /* dropless flow control */
2278                 if (CHIP_IS_E1H(bp)) {
2279                         int port = BP_PORT(bp);
2280                         u32 pause_enabled = 0;
2281
2282                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2283                                 pause_enabled = 1;
2284
2285                         REG_WR(bp, BAR_USTRORM_INTMEM +
2286                                USTORM_PAUSE_ENABLED_OFFSET(port),
2287                                pause_enabled);
2288                 }
2289
2290                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2291                         struct host_port_stats *pstats;
2292
2293                         pstats = bnx2x_sp(bp, port_stats);
2294                         /* reset old bmac stats */
2295                         memset(&(pstats->mac_stx[0]), 0,
2296                                sizeof(struct mac_stx));
2297                 }
2298                 if ((bp->state == BNX2X_STATE_OPEN) ||
2299                     (bp->state == BNX2X_STATE_DISABLED))
2300                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2301         }
2302
2303         /* indicate link status */
2304         bnx2x_link_report(bp);
2305
2306         if (IS_E1HMF(bp)) {
2307                 int port = BP_PORT(bp);
2308                 int func;
2309                 int vn;
2310
2311                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2312                         if (vn == BP_E1HVN(bp))
2313                                 continue;
2314
2315                         func = ((vn << 1) | port);
2316
2317                         /* Set the attention towards other drivers
2318                            on the same port */
2319                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2320                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2321                 }
2322
2323                 if (bp->link_vars.link_up) {
2324                         int i;
2325
2326                         /* Init rate shaping and fairness contexts */
2327                         bnx2x_init_port_minmax(bp);
2328
2329                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2330                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2331
2332                         /* Store it to internal memory */
2333                         for (i = 0;
2334                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2335                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2336                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2337                                        ((u32 *)(&bp->cmng))[i]);
2338                 }
2339         }
2340 }
2341
2342 static void bnx2x__link_status_update(struct bnx2x *bp)
2343 {
2344         if (bp->state != BNX2X_STATE_OPEN)
2345                 return;
2346
2347         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2348
2349         if (bp->link_vars.link_up)
2350                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2351         else
2352                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2353
2354         /* indicate link status */
2355         bnx2x_link_report(bp);
2356 }
2357
2358 static void bnx2x_pmf_update(struct bnx2x *bp)
2359 {
2360         int port = BP_PORT(bp);
2361         u32 val;
2362
2363         bp->port.pmf = 1;
2364         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2365
2366         /* enable nig attention */
2367         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2368         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2369         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2370
2371         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2372 }
2373
2374 /* end of Link */
2375
2376 /* slow path */
2377
2378 /*
2379  * General service functions
2380  */
2381
2382 /* the slow path queue is odd since completions arrive on the fastpath ring */
2383 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2384                          u32 data_hi, u32 data_lo, int common)
2385 {
2386         int func = BP_FUNC(bp);
2387
2388         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2389            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2390            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2391            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2392            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2393
2394 #ifdef BNX2X_STOP_ON_ERROR
2395         if (unlikely(bp->panic))
2396                 return -EIO;
2397 #endif
2398
2399         spin_lock_bh(&bp->spq_lock);
2400
2401         if (!bp->spq_left) {
2402                 BNX2X_ERR("BUG! SPQ ring full!\n");
2403                 spin_unlock_bh(&bp->spq_lock);
2404                 bnx2x_panic();
2405                 return -EBUSY;
2406         }
2407
2408         /* CID needs port number to be encoded int it */
2409         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2410                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2411                                      HW_CID(bp, cid)));
2412         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2413         if (common)
2414                 bp->spq_prod_bd->hdr.type |=
2415                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2416
2417         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2418         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2419
2420         bp->spq_left--;
2421
2422         if (bp->spq_prod_bd == bp->spq_last_bd) {
2423                 bp->spq_prod_bd = bp->spq;
2424                 bp->spq_prod_idx = 0;
2425                 DP(NETIF_MSG_TIMER, "end of spq\n");
2426
2427         } else {
2428                 bp->spq_prod_bd++;
2429                 bp->spq_prod_idx++;
2430         }
2431
2432         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2433                bp->spq_prod_idx);
2434
2435         spin_unlock_bh(&bp->spq_lock);
2436         return 0;
2437 }
2438
2439 /* acquire split MCP access lock register */
2440 static int bnx2x_acquire_alr(struct bnx2x *bp)
2441 {
2442         u32 i, j, val;
2443         int rc = 0;
2444
2445         might_sleep();
2446         i = 100;
2447         for (j = 0; j < i*10; j++) {
2448                 val = (1UL << 31);
2449                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2450                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2451                 if (val & (1L << 31))
2452                         break;
2453
2454                 msleep(5);
2455         }
2456         if (!(val & (1L << 31))) {
2457                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2458                 rc = -EBUSY;
2459         }
2460
2461         return rc;
2462 }
2463
2464 /* release split MCP access lock register */
2465 static void bnx2x_release_alr(struct bnx2x *bp)
2466 {
2467         u32 val = 0;
2468
2469         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2470 }
2471
2472 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2473 {
2474         struct host_def_status_block *def_sb = bp->def_status_blk;
2475         u16 rc = 0;
2476
2477         barrier(); /* status block is written to by the chip */
2478         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2479                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2480                 rc |= 1;
2481         }
2482         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2483                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2484                 rc |= 2;
2485         }
2486         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2487                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2488                 rc |= 4;
2489         }
2490         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2491                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2492                 rc |= 8;
2493         }
2494         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2495                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2496                 rc |= 16;
2497         }
2498         return rc;
2499 }
2500
2501 /*
2502  * slow path service functions
2503  */
2504
2505 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2506 {
2507         int port = BP_PORT(bp);
2508         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2509                        COMMAND_REG_ATTN_BITS_SET);
2510         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2511                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2512         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2513                                        NIG_REG_MASK_INTERRUPT_PORT0;
2514         u32 aeu_mask;
2515         u32 nig_mask = 0;
2516
2517         if (bp->attn_state & asserted)
2518                 BNX2X_ERR("IGU ERROR\n");
2519
2520         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2521         aeu_mask = REG_RD(bp, aeu_addr);
2522
2523         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2524            aeu_mask, asserted);
2525         aeu_mask &= ~(asserted & 0xff);
2526         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2527
2528         REG_WR(bp, aeu_addr, aeu_mask);
2529         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2530
2531         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2532         bp->attn_state |= asserted;
2533         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2534
2535         if (asserted & ATTN_HARD_WIRED_MASK) {
2536                 if (asserted & ATTN_NIG_FOR_FUNC) {
2537
2538                         bnx2x_acquire_phy_lock(bp);
2539
2540                         /* save nig interrupt mask */
2541                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2542                         REG_WR(bp, nig_int_mask_addr, 0);
2543
2544                         bnx2x_link_attn(bp);
2545
2546                         /* handle unicore attn? */
2547                 }
2548                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2549                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2550
2551                 if (asserted & GPIO_2_FUNC)
2552                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2553
2554                 if (asserted & GPIO_3_FUNC)
2555                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2556
2557                 if (asserted & GPIO_4_FUNC)
2558                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2559
2560                 if (port == 0) {
2561                         if (asserted & ATTN_GENERAL_ATTN_1) {
2562                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2563                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2564                         }
2565                         if (asserted & ATTN_GENERAL_ATTN_2) {
2566                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2567                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2568                         }
2569                         if (asserted & ATTN_GENERAL_ATTN_3) {
2570                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2571                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2572                         }
2573                 } else {
2574                         if (asserted & ATTN_GENERAL_ATTN_4) {
2575                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2576                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2577                         }
2578                         if (asserted & ATTN_GENERAL_ATTN_5) {
2579                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2580                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2581                         }
2582                         if (asserted & ATTN_GENERAL_ATTN_6) {
2583                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2584                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2585                         }
2586                 }
2587
2588         } /* if hardwired */
2589
2590         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2591            asserted, hc_addr);
2592         REG_WR(bp, hc_addr, asserted);
2593
2594         /* now set back the mask */
2595         if (asserted & ATTN_NIG_FOR_FUNC) {
2596                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2597                 bnx2x_release_phy_lock(bp);
2598         }
2599 }
2600
2601 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2602 {
2603         int port = BP_PORT(bp);
2604         int reg_offset;
2605         u32 val;
2606
2607         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2608                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2609
2610         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2611
2612                 val = REG_RD(bp, reg_offset);
2613                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2614                 REG_WR(bp, reg_offset, val);
2615
2616                 BNX2X_ERR("SPIO5 hw attention\n");
2617
2618                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2619                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2620                         /* Fan failure attention */
2621
2622                         /* The PHY reset is controlled by GPIO 1 */
2623                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2624                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2625                         /* Low power mode is controlled by GPIO 2 */
2626                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2627                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2628                         /* mark the failure */
2629                         bp->link_params.ext_phy_config &=
2630                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2631                         bp->link_params.ext_phy_config |=
2632                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2633                         SHMEM_WR(bp,
2634                                  dev_info.port_hw_config[port].
2635                                                         external_phy_config,
2636                                  bp->link_params.ext_phy_config);
2637                         /* log the failure */
2638                         printk(KERN_ERR PFX "Fan Failure on Network"
2639                                " Controller %s has caused the driver to"
2640                                " shutdown the card to prevent permanent"
2641                                " damage.  Please contact Dell Support for"
2642                                " assistance\n", bp->dev->name);
2643                         break;
2644
2645                 default:
2646                         break;
2647                 }
2648         }
2649
2650         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2651                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2652                 bnx2x_acquire_phy_lock(bp);
2653                 bnx2x_handle_module_detect_int(&bp->link_params);
2654                 bnx2x_release_phy_lock(bp);
2655         }
2656
2657         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2658
2659                 val = REG_RD(bp, reg_offset);
2660                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2661                 REG_WR(bp, reg_offset, val);
2662
2663                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2664                           (attn & HW_INTERRUT_ASSERT_SET_0));
2665                 bnx2x_panic();
2666         }
2667 }
2668
2669 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2670 {
2671         u32 val;
2672
2673         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2674
2675                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2676                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2677                 /* DORQ discard attention */
2678                 if (val & 0x2)
2679                         BNX2X_ERR("FATAL error from DORQ\n");
2680         }
2681
2682         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2683
2684                 int port = BP_PORT(bp);
2685                 int reg_offset;
2686
2687                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2688                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2689
2690                 val = REG_RD(bp, reg_offset);
2691                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2692                 REG_WR(bp, reg_offset, val);
2693
2694                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2695                           (attn & HW_INTERRUT_ASSERT_SET_1));
2696                 bnx2x_panic();
2697         }
2698 }
2699
2700 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2701 {
2702         u32 val;
2703
2704         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2705
2706                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2707                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2708                 /* CFC error attention */
2709                 if (val & 0x2)
2710                         BNX2X_ERR("FATAL error from CFC\n");
2711         }
2712
2713         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2714
2715                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2716                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2717                 /* RQ_USDMDP_FIFO_OVERFLOW */
2718                 if (val & 0x18000)
2719                         BNX2X_ERR("FATAL error from PXP\n");
2720         }
2721
2722         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2723
2724                 int port = BP_PORT(bp);
2725                 int reg_offset;
2726
2727                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2728                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2729
2730                 val = REG_RD(bp, reg_offset);
2731                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2732                 REG_WR(bp, reg_offset, val);
2733
2734                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2735                           (attn & HW_INTERRUT_ASSERT_SET_2));
2736                 bnx2x_panic();
2737         }
2738 }
2739
2740 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2741 {
2742         u32 val;
2743
2744         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2745
2746                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2747                         int func = BP_FUNC(bp);
2748
2749                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2750                         bnx2x__link_status_update(bp);
2751                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2752                                                         DRV_STATUS_PMF)
2753                                 bnx2x_pmf_update(bp);
2754
2755                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2756
2757                         BNX2X_ERR("MC assert!\n");
2758                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2759                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2760                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2761                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2762                         bnx2x_panic();
2763
2764                 } else if (attn & BNX2X_MCP_ASSERT) {
2765
2766                         BNX2X_ERR("MCP assert!\n");
2767                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2768                         bnx2x_fw_dump(bp);
2769
2770                 } else
2771                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2772         }
2773
2774         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2775                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2776                 if (attn & BNX2X_GRC_TIMEOUT) {
2777                         val = CHIP_IS_E1H(bp) ?
2778                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2779                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2780                 }
2781                 if (attn & BNX2X_GRC_RSV) {
2782                         val = CHIP_IS_E1H(bp) ?
2783                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2784                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2785                 }
2786                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2787         }
2788 }
2789
2790 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2791 {
2792         struct attn_route attn;
2793         struct attn_route group_mask;
2794         int port = BP_PORT(bp);
2795         int index;
2796         u32 reg_addr;
2797         u32 val;
2798         u32 aeu_mask;
2799
2800         /* need to take HW lock because MCP or other port might also
2801            try to handle this event */
2802         bnx2x_acquire_alr(bp);
2803
2804         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2805         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2806         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2807         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2808         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2809            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2810
2811         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2812                 if (deasserted & (1 << index)) {
2813                         group_mask = bp->attn_group[index];
2814
2815                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2816                            index, group_mask.sig[0], group_mask.sig[1],
2817                            group_mask.sig[2], group_mask.sig[3]);
2818
2819                         bnx2x_attn_int_deasserted3(bp,
2820                                         attn.sig[3] & group_mask.sig[3]);
2821                         bnx2x_attn_int_deasserted1(bp,
2822                                         attn.sig[1] & group_mask.sig[1]);
2823                         bnx2x_attn_int_deasserted2(bp,
2824                                         attn.sig[2] & group_mask.sig[2]);
2825                         bnx2x_attn_int_deasserted0(bp,
2826                                         attn.sig[0] & group_mask.sig[0]);
2827
2828                         if ((attn.sig[0] & group_mask.sig[0] &
2829                                                 HW_PRTY_ASSERT_SET_0) ||
2830                             (attn.sig[1] & group_mask.sig[1] &
2831                                                 HW_PRTY_ASSERT_SET_1) ||
2832                             (attn.sig[2] & group_mask.sig[2] &
2833                                                 HW_PRTY_ASSERT_SET_2))
2834                                 BNX2X_ERR("FATAL HW block parity attention\n");
2835                 }
2836         }
2837
2838         bnx2x_release_alr(bp);
2839
2840         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2841
2842         val = ~deasserted;
2843         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2844            val, reg_addr);
2845         REG_WR(bp, reg_addr, val);
2846
2847         if (~bp->attn_state & deasserted)
2848                 BNX2X_ERR("IGU ERROR\n");
2849
2850         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2851                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2852
2853         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2854         aeu_mask = REG_RD(bp, reg_addr);
2855
2856         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2857            aeu_mask, deasserted);
2858         aeu_mask |= (deasserted & 0xff);
2859         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2860
2861         REG_WR(bp, reg_addr, aeu_mask);
2862         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2863
2864         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2865         bp->attn_state &= ~deasserted;
2866         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2867 }
2868
2869 static void bnx2x_attn_int(struct bnx2x *bp)
2870 {
2871         /* read local copy of bits */
2872         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2873                                                                 attn_bits);
2874         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2875                                                                 attn_bits_ack);
2876         u32 attn_state = bp->attn_state;
2877
2878         /* look for changed bits */
2879         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2880         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2881
2882         DP(NETIF_MSG_HW,
2883            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2884            attn_bits, attn_ack, asserted, deasserted);
2885
2886         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2887                 BNX2X_ERR("BAD attention state\n");
2888
2889         /* handle bits that were raised */
2890         if (asserted)
2891                 bnx2x_attn_int_asserted(bp, asserted);
2892
2893         if (deasserted)
2894                 bnx2x_attn_int_deasserted(bp, deasserted);
2895 }
2896
2897 static void bnx2x_sp_task(struct work_struct *work)
2898 {
2899         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2900         u16 status;
2901
2902
2903         /* Return here if interrupt is disabled */
2904         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2905                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2906                 return;
2907         }
2908
2909         status = bnx2x_update_dsb_idx(bp);
2910 /*      if (status == 0)                                     */
2911 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2912
2913         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2914
2915         /* HW attentions */
2916         if (status & 0x1)
2917                 bnx2x_attn_int(bp);
2918
2919         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2920                      IGU_INT_NOP, 1);
2921         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2922                      IGU_INT_NOP, 1);
2923         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2924                      IGU_INT_NOP, 1);
2925         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2926                      IGU_INT_NOP, 1);
2927         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2928                      IGU_INT_ENABLE, 1);
2929
2930 }
2931
2932 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2933 {
2934         struct net_device *dev = dev_instance;
2935         struct bnx2x *bp = netdev_priv(dev);
2936
2937         /* Return here if interrupt is disabled */
2938         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2939                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2940                 return IRQ_HANDLED;
2941         }
2942
2943         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2944
2945 #ifdef BNX2X_STOP_ON_ERROR
2946         if (unlikely(bp->panic))
2947                 return IRQ_HANDLED;
2948 #endif
2949
2950         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2951
2952         return IRQ_HANDLED;
2953 }
2954
2955 /* end of slow path */
2956
2957 /* Statistics */
2958
2959 /****************************************************************************
2960 * Macros
2961 ****************************************************************************/
2962
2963 /* sum[hi:lo] += add[hi:lo] */
2964 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2965         do { \
2966                 s_lo += a_lo; \
2967                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2968         } while (0)
2969
2970 /* difference = minuend - subtrahend */
2971 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2972         do { \
2973                 if (m_lo < s_lo) { \
2974                         /* underflow */ \
2975                         d_hi = m_hi - s_hi; \
2976                         if (d_hi > 0) { \
2977                                 /* we can 'loan' 1 */ \
2978                                 d_hi--; \
2979                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2980                         } else { \
2981                                 /* m_hi <= s_hi */ \
2982                                 d_hi = 0; \
2983                                 d_lo = 0; \
2984                         } \
2985                 } else { \
2986                         /* m_lo >= s_lo */ \
2987                         if (m_hi < s_hi) { \
2988                                 d_hi = 0; \
2989                                 d_lo = 0; \
2990                         } else { \
2991                                 /* m_hi >= s_hi */ \
2992                                 d_hi = m_hi - s_hi; \
2993                                 d_lo = m_lo - s_lo; \
2994                         } \
2995                 } \
2996         } while (0)
2997
2998 #define UPDATE_STAT64(s, t) \
2999         do { \
3000                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3001                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3002                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3003                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3004                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3005                        pstats->mac_stx[1].t##_lo, diff.lo); \
3006         } while (0)
3007
3008 #define UPDATE_STAT64_NIG(s, t) \
3009         do { \
3010                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3011                         diff.lo, new->s##_lo, old->s##_lo); \
3012                 ADD_64(estats->t##_hi, diff.hi, \
3013                        estats->t##_lo, diff.lo); \
3014         } while (0)
3015
3016 /* sum[hi:lo] += add */
3017 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3018         do { \
3019                 s_lo += a; \
3020                 s_hi += (s_lo < a) ? 1 : 0; \
3021         } while (0)
3022
3023 #define UPDATE_EXTEND_STAT(s) \
3024         do { \
3025                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3026                               pstats->mac_stx[1].s##_lo, \
3027                               new->s); \
3028         } while (0)
3029
3030 #define UPDATE_EXTEND_TSTAT(s, t) \
3031         do { \
3032                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3033                 old_tclient->s = tclient->s; \
3034                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3035         } while (0)
3036
3037 #define UPDATE_EXTEND_USTAT(s, t) \
3038         do { \
3039                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3040                 old_uclient->s = uclient->s; \
3041                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3042         } while (0)
3043
3044 #define UPDATE_EXTEND_XSTAT(s, t) \
3045         do { \
3046                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3047                 old_xclient->s = xclient->s; \
3048                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3049         } while (0)
3050
3051 /* minuend -= subtrahend */
3052 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3053         do { \
3054                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3055         } while (0)
3056
3057 /* minuend[hi:lo] -= subtrahend */
3058 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3059         do { \
3060                 SUB_64(m_hi, 0, m_lo, s); \
3061         } while (0)
3062
3063 #define SUB_EXTEND_USTAT(s, t) \
3064         do { \
3065                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3066                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3067         } while (0)
3068
3069 /*
3070  * General service functions
3071  */
3072
3073 static inline long bnx2x_hilo(u32 *hiref)
3074 {
3075         u32 lo = *(hiref + 1);
3076 #if (BITS_PER_LONG == 64)
3077         u32 hi = *hiref;
3078
3079         return HILO_U64(hi, lo);
3080 #else
3081         return lo;
3082 #endif
3083 }
3084
3085 /*
3086  * Init service functions
3087  */
3088
3089 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3090 {
3091         if (!bp->stats_pending) {
3092                 struct eth_query_ramrod_data ramrod_data = {0};
3093                 int i, rc;
3094
3095                 ramrod_data.drv_counter = bp->stats_counter++;
3096                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3097                 for_each_queue(bp, i)
3098                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3099
3100                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3101                                    ((u32 *)&ramrod_data)[1],
3102                                    ((u32 *)&ramrod_data)[0], 0);
3103                 if (rc == 0) {
3104                         /* stats ramrod has it's own slot on the spq */
3105                         bp->spq_left++;
3106                         bp->stats_pending = 1;
3107                 }
3108         }
3109 }
3110
3111 static void bnx2x_stats_init(struct bnx2x *bp)
3112 {
3113         int port = BP_PORT(bp);
3114         int i;
3115
3116         bp->stats_pending = 0;
3117         bp->executer_idx = 0;
3118         bp->stats_counter = 0;
3119
3120         /* port stats */
3121         if (!BP_NOMCP(bp))
3122                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3123         else
3124                 bp->port.port_stx = 0;
3125         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3126
3127         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3128         bp->port.old_nig_stats.brb_discard =
3129                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3130         bp->port.old_nig_stats.brb_truncate =
3131                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3132         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3133                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3134         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3135                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3136
3137         /* function stats */
3138         for_each_queue(bp, i) {
3139                 struct bnx2x_fastpath *fp = &bp->fp[i];
3140
3141                 memset(&fp->old_tclient, 0,
3142                        sizeof(struct tstorm_per_client_stats));
3143                 memset(&fp->old_uclient, 0,
3144                        sizeof(struct ustorm_per_client_stats));
3145                 memset(&fp->old_xclient, 0,
3146                        sizeof(struct xstorm_per_client_stats));
3147                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3148         }
3149
3150         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3151         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3152
3153         bp->stats_state = STATS_STATE_DISABLED;
3154         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3155                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3156 }
3157
3158 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3159 {
3160         struct dmae_command *dmae = &bp->stats_dmae;
3161         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3162
3163         *stats_comp = DMAE_COMP_VAL;
3164         if (CHIP_REV_IS_SLOW(bp))
3165                 return;
3166
3167         /* loader */
3168         if (bp->executer_idx) {
3169                 int loader_idx = PMF_DMAE_C(bp);
3170
3171                 memset(dmae, 0, sizeof(struct dmae_command));
3172
3173                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3174                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3175                                 DMAE_CMD_DST_RESET |
3176 #ifdef __BIG_ENDIAN
3177                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3178 #else
3179                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3180 #endif
3181                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3182                                                DMAE_CMD_PORT_0) |
3183                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3184                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3185                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3186                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3187                                      sizeof(struct dmae_command) *
3188                                      (loader_idx + 1)) >> 2;
3189                 dmae->dst_addr_hi = 0;
3190                 dmae->len = sizeof(struct dmae_command) >> 2;
3191                 if (CHIP_IS_E1(bp))
3192                         dmae->len--;
3193                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3194                 dmae->comp_addr_hi = 0;
3195                 dmae->comp_val = 1;
3196
3197                 *stats_comp = 0;
3198                 bnx2x_post_dmae(bp, dmae, loader_idx);
3199
3200         } else if (bp->func_stx) {
3201                 *stats_comp = 0;
3202                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3203         }
3204 }
3205
3206 static int bnx2x_stats_comp(struct bnx2x *bp)
3207 {
3208         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3209         int cnt = 10;
3210
3211         might_sleep();
3212         while (*stats_comp != DMAE_COMP_VAL) {
3213                 if (!cnt) {
3214                         BNX2X_ERR("timeout waiting for stats finished\n");
3215                         break;
3216                 }
3217                 cnt--;
3218                 msleep(1);
3219         }
3220         return 1;
3221 }
3222
3223 /*
3224  * Statistics service functions
3225  */
3226
3227 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3228 {
3229         struct dmae_command *dmae;
3230         u32 opcode;
3231         int loader_idx = PMF_DMAE_C(bp);
3232         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3233
3234         /* sanity */
3235         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3236                 BNX2X_ERR("BUG!\n");
3237                 return;
3238         }
3239
3240         bp->executer_idx = 0;
3241
3242         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3243                   DMAE_CMD_C_ENABLE |
3244                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3245 #ifdef __BIG_ENDIAN
3246                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3247 #else
3248                   DMAE_CMD_ENDIANITY_DW_SWAP |
3249 #endif
3250                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3251                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3252
3253         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3254         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3255         dmae->src_addr_lo = bp->port.port_stx >> 2;
3256         dmae->src_addr_hi = 0;
3257         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3258         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3259         dmae->len = DMAE_LEN32_RD_MAX;
3260         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3261         dmae->comp_addr_hi = 0;
3262         dmae->comp_val = 1;
3263
3264         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3266         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3267         dmae->src_addr_hi = 0;
3268         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3269                                    DMAE_LEN32_RD_MAX * 4);
3270         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3271                                    DMAE_LEN32_RD_MAX * 4);
3272         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3273         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3274         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3275         dmae->comp_val = DMAE_COMP_VAL;
3276
3277         *stats_comp = 0;
3278         bnx2x_hw_stats_post(bp);
3279         bnx2x_stats_comp(bp);
3280 }
3281
3282 static void bnx2x_port_stats_init(struct bnx2x *bp)
3283 {
3284         struct dmae_command *dmae;
3285         int port = BP_PORT(bp);
3286         int vn = BP_E1HVN(bp);
3287         u32 opcode;
3288         int loader_idx = PMF_DMAE_C(bp);
3289         u32 mac_addr;
3290         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3291
3292         /* sanity */
3293         if (!bp->link_vars.link_up || !bp->port.pmf) {
3294                 BNX2X_ERR("BUG!\n");
3295                 return;
3296         }
3297
3298         bp->executer_idx = 0;
3299
3300         /* MCP */
3301         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3302                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3303                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3304 #ifdef __BIG_ENDIAN
3305                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3306 #else
3307                   DMAE_CMD_ENDIANITY_DW_SWAP |
3308 #endif
3309                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3310                   (vn << DMAE_CMD_E1HVN_SHIFT));
3311
3312         if (bp->port.port_stx) {
3313
3314                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3315                 dmae->opcode = opcode;
3316                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3317                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3318                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3319                 dmae->dst_addr_hi = 0;
3320                 dmae->len = sizeof(struct host_port_stats) >> 2;
3321                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322                 dmae->comp_addr_hi = 0;
3323                 dmae->comp_val = 1;
3324         }
3325
3326         if (bp->func_stx) {
3327
3328                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329                 dmae->opcode = opcode;
3330                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3331                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3332                 dmae->dst_addr_lo = bp->func_stx >> 2;
3333                 dmae->dst_addr_hi = 0;
3334                 dmae->len = sizeof(struct host_func_stats) >> 2;
3335                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336                 dmae->comp_addr_hi = 0;
3337                 dmae->comp_val = 1;
3338         }
3339
3340         /* MAC */
3341         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3342                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3343                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3344 #ifdef __BIG_ENDIAN
3345                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3346 #else
3347                   DMAE_CMD_ENDIANITY_DW_SWAP |
3348 #endif
3349                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3350                   (vn << DMAE_CMD_E1HVN_SHIFT));
3351
3352         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3353
3354                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3355                                    NIG_REG_INGRESS_BMAC0_MEM);
3356
3357                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3358                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3359                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360                 dmae->opcode = opcode;
3361                 dmae->src_addr_lo = (mac_addr +
3362                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3363                 dmae->src_addr_hi = 0;
3364                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3365                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3366                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3367                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3368                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3369                 dmae->comp_addr_hi = 0;
3370                 dmae->comp_val = 1;
3371
3372                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3373                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3374                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3375                 dmae->opcode = opcode;
3376                 dmae->src_addr_lo = (mac_addr +
3377                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3378                 dmae->src_addr_hi = 0;
3379                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3380                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3381                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3382                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3383                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3384                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3385                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3386                 dmae->comp_addr_hi = 0;
3387                 dmae->comp_val = 1;
3388
3389         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3390
3391                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3392
3393                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3394                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3395                 dmae->opcode = opcode;
3396                 dmae->src_addr_lo = (mac_addr +
3397                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3398                 dmae->src_addr_hi = 0;
3399                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3400                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3401                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3402                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3403                 dmae->comp_addr_hi = 0;
3404                 dmae->comp_val = 1;
3405
3406                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3407                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3408                 dmae->opcode = opcode;
3409                 dmae->src_addr_lo = (mac_addr +
3410                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3411                 dmae->src_addr_hi = 0;
3412                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3413                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3414                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3415                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3416                 dmae->len = 1;
3417                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3418                 dmae->comp_addr_hi = 0;
3419                 dmae->comp_val = 1;
3420
3421                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3422                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3423                 dmae->opcode = opcode;
3424                 dmae->src_addr_lo = (mac_addr +
3425                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3426                 dmae->src_addr_hi = 0;
3427                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3428                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3429                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3430                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3431                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3432                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3433                 dmae->comp_addr_hi = 0;
3434                 dmae->comp_val = 1;
3435         }
3436
3437         /* NIG */
3438         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3439         dmae->opcode = opcode;
3440         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3441                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3442         dmae->src_addr_hi = 0;
3443         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3444         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3445         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3446         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3447         dmae->comp_addr_hi = 0;
3448         dmae->comp_val = 1;
3449
3450         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3451         dmae->opcode = opcode;
3452         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3453                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3454         dmae->src_addr_hi = 0;
3455         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3456                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3457         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3458                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3459         dmae->len = (2*sizeof(u32)) >> 2;
3460         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3461         dmae->comp_addr_hi = 0;
3462         dmae->comp_val = 1;
3463
3464         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3465         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3466                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3467                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3468 #ifdef __BIG_ENDIAN
3469                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3470 #else
3471                         DMAE_CMD_ENDIANITY_DW_SWAP |
3472 #endif
3473                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3474                         (vn << DMAE_CMD_E1HVN_SHIFT));
3475         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3476                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3477         dmae->src_addr_hi = 0;
3478         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3479                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3480         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3481                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3482         dmae->len = (2*sizeof(u32)) >> 2;
3483         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3484         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3485         dmae->comp_val = DMAE_COMP_VAL;
3486
3487         *stats_comp = 0;
3488 }
3489
3490 static void bnx2x_func_stats_init(struct bnx2x *bp)
3491 {
3492         struct dmae_command *dmae = &bp->stats_dmae;
3493         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3494
3495         /* sanity */
3496         if (!bp->func_stx) {
3497                 BNX2X_ERR("BUG!\n");
3498                 return;
3499         }
3500
3501         bp->executer_idx = 0;
3502         memset(dmae, 0, sizeof(struct dmae_command));
3503
3504         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3505                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3506                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3507 #ifdef __BIG_ENDIAN
3508                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3509 #else
3510                         DMAE_CMD_ENDIANITY_DW_SWAP |
3511 #endif
3512                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3513                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3514         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3515         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3516         dmae->dst_addr_lo = bp->func_stx >> 2;
3517         dmae->dst_addr_hi = 0;
3518         dmae->len = sizeof(struct host_func_stats) >> 2;
3519         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3520         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3521         dmae->comp_val = DMAE_COMP_VAL;
3522
3523         *stats_comp = 0;
3524 }
3525
3526 static void bnx2x_stats_start(struct bnx2x *bp)
3527 {
3528         if (bp->port.pmf)
3529                 bnx2x_port_stats_init(bp);
3530
3531         else if (bp->func_stx)
3532                 bnx2x_func_stats_init(bp);
3533
3534         bnx2x_hw_stats_post(bp);
3535         bnx2x_storm_stats_post(bp);
3536 }
3537
3538 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3539 {
3540         bnx2x_stats_comp(bp);
3541         bnx2x_stats_pmf_update(bp);
3542         bnx2x_stats_start(bp);
3543 }
3544
3545 static void bnx2x_stats_restart(struct bnx2x *bp)
3546 {
3547         bnx2x_stats_comp(bp);
3548         bnx2x_stats_start(bp);
3549 }
3550
3551 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3552 {
3553         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3554         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3555         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3556         struct {
3557                 u32 lo;
3558                 u32 hi;
3559         } diff;
3560
3561         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3562         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3563         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3564         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3565         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3566         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3567         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3568         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3569         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3570         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3571         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3572         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3573         UPDATE_STAT64(tx_stat_gt127,
3574                                 tx_stat_etherstatspkts65octetsto127octets);
3575         UPDATE_STAT64(tx_stat_gt255,
3576                                 tx_stat_etherstatspkts128octetsto255octets);
3577         UPDATE_STAT64(tx_stat_gt511,
3578                                 tx_stat_etherstatspkts256octetsto511octets);
3579         UPDATE_STAT64(tx_stat_gt1023,
3580                                 tx_stat_etherstatspkts512octetsto1023octets);
3581         UPDATE_STAT64(tx_stat_gt1518,
3582                                 tx_stat_etherstatspkts1024octetsto1522octets);
3583         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3584         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3585         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3586         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3587         UPDATE_STAT64(tx_stat_gterr,
3588                                 tx_stat_dot3statsinternalmactransmiterrors);
3589         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3590
3591         estats->pause_frames_received_hi =
3592                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3593         estats->pause_frames_received_lo =
3594                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3595
3596         estats->pause_frames_sent_hi =
3597                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3598         estats->pause_frames_sent_lo =
3599                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3600 }
3601
3602 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3603 {
3604         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3605         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3606         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3607
3608         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3609         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3610         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3611         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3612         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3613         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3614         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3615         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3616         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3617         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3618         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3619         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3620         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3621         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3622         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3623         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3624         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3625         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3626         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3627         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3628         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3629         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3630         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3631         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3632         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3633         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3634         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3635         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3636         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3637         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3638         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3639
3640         estats->pause_frames_received_hi =
3641                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3642         estats->pause_frames_received_lo =
3643                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3644         ADD_64(estats->pause_frames_received_hi,
3645                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3646                estats->pause_frames_received_lo,
3647                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3648
3649         estats->pause_frames_sent_hi =
3650                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3651         estats->pause_frames_sent_lo =
3652                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3653         ADD_64(estats->pause_frames_sent_hi,
3654                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3655                estats->pause_frames_sent_lo,
3656                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3657 }
3658
3659 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3660 {
3661         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3662         struct nig_stats *old = &(bp->port.old_nig_stats);
3663         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3664         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3665         struct {
3666                 u32 lo;
3667                 u32 hi;
3668         } diff;
3669         u32 nig_timer_max;
3670
3671         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3672                 bnx2x_bmac_stats_update(bp);
3673
3674         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3675                 bnx2x_emac_stats_update(bp);
3676
3677         else { /* unreached */
3678                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3679                 return -1;
3680         }
3681
3682         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3683                       new->brb_discard - old->brb_discard);
3684         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3685                       new->brb_truncate - old->brb_truncate);
3686
3687         UPDATE_STAT64_NIG(egress_mac_pkt0,
3688                                         etherstatspkts1024octetsto1522octets);
3689         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3690
3691         memcpy(old, new, sizeof(struct nig_stats));
3692
3693         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3694                sizeof(struct mac_stx));
3695         estats->brb_drop_hi = pstats->brb_drop_hi;
3696         estats->brb_drop_lo = pstats->brb_drop_lo;
3697
3698         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3699
3700         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3701         if (nig_timer_max != estats->nig_timer_max) {
3702                 estats->nig_timer_max = nig_timer_max;
3703                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3704         }
3705
3706         return 0;
3707 }
3708
3709 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3710 {
3711         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3712         struct tstorm_per_port_stats *tport =
3713                                         &stats->tstorm_common.port_statistics;
3714         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3715         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3716         int i;
3717
3718         memset(&(fstats->total_bytes_received_hi), 0,
3719                sizeof(struct host_func_stats) - 2*sizeof(u32));
3720         estats->error_bytes_received_hi = 0;
3721         estats->error_bytes_received_lo = 0;
3722         estats->etherstatsoverrsizepkts_hi = 0;
3723         estats->etherstatsoverrsizepkts_lo = 0;
3724         estats->no_buff_discard_hi = 0;
3725         estats->no_buff_discard_lo = 0;
3726
3727         for_each_queue(bp, i) {
3728                 struct bnx2x_fastpath *fp = &bp->fp[i];
3729                 int cl_id = fp->cl_id;
3730                 struct tstorm_per_client_stats *tclient =
3731                                 &stats->tstorm_common.client_statistics[cl_id];
3732                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3733                 struct ustorm_per_client_stats *uclient =
3734                                 &stats->ustorm_common.client_statistics[cl_id];
3735                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3736                 struct xstorm_per_client_stats *xclient =
3737                                 &stats->xstorm_common.client_statistics[cl_id];
3738                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3739                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3740                 u32 diff;
3741
3742                 /* are storm stats valid? */
3743                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3744                                                         bp->stats_counter) {
3745                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3746                            "  xstorm counter (%d) != stats_counter (%d)\n",
3747                            i, xclient->stats_counter, bp->stats_counter);
3748                         return -1;
3749                 }
3750                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3751                                                         bp->stats_counter) {
3752                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3753                            "  tstorm counter (%d) != stats_counter (%d)\n",
3754                            i, tclient->stats_counter, bp->stats_counter);
3755                         return -2;
3756                 }
3757                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3758                                                         bp->stats_counter) {
3759                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3760                            "  ustorm counter (%d) != stats_counter (%d)\n",
3761                            i, uclient->stats_counter, bp->stats_counter);
3762                         return -4;
3763                 }
3764
3765                 qstats->total_bytes_received_hi =
3766                 qstats->valid_bytes_received_hi =
3767                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3768                 qstats->total_bytes_received_lo =
3769                 qstats->valid_bytes_received_lo =
3770                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3771
3772                 qstats->error_bytes_received_hi =
3773                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3774                 qstats->error_bytes_received_lo =
3775                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3776
3777                 ADD_64(qstats->total_bytes_received_hi,
3778                        qstats->error_bytes_received_hi,
3779                        qstats->total_bytes_received_lo,
3780                        qstats->error_bytes_received_lo);
3781
3782                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3783                                         total_unicast_packets_received);
3784                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3785                                         total_multicast_packets_received);
3786                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3787                                         total_broadcast_packets_received);
3788                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3789                                         etherstatsoverrsizepkts);
3790                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3791
3792                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3793                                         total_unicast_packets_received);
3794                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3795                                         total_multicast_packets_received);
3796                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3797                                         total_broadcast_packets_received);
3798                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3799                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3800                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3801
3802                 qstats->total_bytes_transmitted_hi =
3803                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3804                 qstats->total_bytes_transmitted_lo =
3805                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3806
3807                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3808                                         total_unicast_packets_transmitted);
3809                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3810                                         total_multicast_packets_transmitted);
3811                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3812                                         total_broadcast_packets_transmitted);
3813
3814                 old_tclient->checksum_discard = tclient->checksum_discard;
3815                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3816
3817                 ADD_64(fstats->total_bytes_received_hi,
3818                        qstats->total_bytes_received_hi,
3819                        fstats->total_bytes_received_lo,
3820                        qstats->total_bytes_received_lo);
3821                 ADD_64(fstats->total_bytes_transmitted_hi,
3822                        qstats->total_bytes_transmitted_hi,
3823                        fstats->total_bytes_transmitted_lo,
3824                        qstats->total_bytes_transmitted_lo);
3825                 ADD_64(fstats->total_unicast_packets_received_hi,
3826                        qstats->total_unicast_packets_received_hi,
3827                        fstats->total_unicast_packets_received_lo,
3828                        qstats->total_unicast_packets_received_lo);
3829                 ADD_64(fstats->total_multicast_packets_received_hi,
3830                        qstats->total_multicast_packets_received_hi,
3831                        fstats->total_multicast_packets_received_lo,
3832                        qstats->total_multicast_packets_received_lo);
3833                 ADD_64(fstats->total_broadcast_packets_received_hi,
3834                        qstats->total_broadcast_packets_received_hi,
3835                        fstats->total_broadcast_packets_received_lo,
3836                        qstats->total_broadcast_packets_received_lo);
3837                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3838                        qstats->total_unicast_packets_transmitted_hi,
3839                        fstats->total_unicast_packets_transmitted_lo,
3840                        qstats->total_unicast_packets_transmitted_lo);
3841                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3842                        qstats->total_multicast_packets_transmitted_hi,
3843                        fstats->total_multicast_packets_transmitted_lo,
3844                        qstats->total_multicast_packets_transmitted_lo);
3845                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3846                        qstats->total_broadcast_packets_transmitted_hi,
3847                        fstats->total_broadcast_packets_transmitted_lo,
3848                        qstats->total_broadcast_packets_transmitted_lo);
3849                 ADD_64(fstats->valid_bytes_received_hi,
3850                        qstats->valid_bytes_received_hi,
3851                        fstats->valid_bytes_received_lo,
3852                        qstats->valid_bytes_received_lo);
3853
3854                 ADD_64(estats->error_bytes_received_hi,
3855                        qstats->error_bytes_received_hi,
3856                        estats->error_bytes_received_lo,
3857                        qstats->error_bytes_received_lo);
3858                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3859                        qstats->etherstatsoverrsizepkts_hi,
3860                        estats->etherstatsoverrsizepkts_lo,
3861                        qstats->etherstatsoverrsizepkts_lo);
3862                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3863                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3864         }
3865
3866         ADD_64(fstats->total_bytes_received_hi,
3867                estats->rx_stat_ifhcinbadoctets_hi,
3868                fstats->total_bytes_received_lo,
3869                estats->rx_stat_ifhcinbadoctets_lo);
3870
3871         memcpy(estats, &(fstats->total_bytes_received_hi),
3872                sizeof(struct host_func_stats) - 2*sizeof(u32));
3873
3874         ADD_64(estats->etherstatsoverrsizepkts_hi,
3875                estats->rx_stat_dot3statsframestoolong_hi,
3876                estats->etherstatsoverrsizepkts_lo,
3877                estats->rx_stat_dot3statsframestoolong_lo);
3878         ADD_64(estats->error_bytes_received_hi,
3879                estats->rx_stat_ifhcinbadoctets_hi,
3880                estats->error_bytes_received_lo,
3881                estats->rx_stat_ifhcinbadoctets_lo);
3882
3883         if (bp->port.pmf) {
3884                 estats->mac_filter_discard =
3885                                 le32_to_cpu(tport->mac_filter_discard);
3886                 estats->xxoverflow_discard =
3887                                 le32_to_cpu(tport->xxoverflow_discard);
3888                 estats->brb_truncate_discard =
3889                                 le32_to_cpu(tport->brb_truncate_discard);
3890                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3891         }
3892
3893         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3894
3895         bp->stats_pending = 0;
3896
3897         return 0;
3898 }
3899
3900 static void bnx2x_net_stats_update(struct bnx2x *bp)
3901 {
3902         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3903         struct net_device_stats *nstats = &bp->dev->stats;
3904         int i;
3905
3906         nstats->rx_packets =
3907                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3908                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3909                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3910
3911         nstats->tx_packets =
3912                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3913                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3914                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3915
3916         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3917
3918         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3919
3920         nstats->rx_dropped = estats->mac_discard;
3921         for_each_queue(bp, i)
3922                 nstats->rx_dropped +=
3923                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3924
3925         nstats->tx_dropped = 0;
3926
3927         nstats->multicast =
3928                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3929
3930         nstats->collisions =
3931                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3932
3933         nstats->rx_length_errors =
3934                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3935                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3936         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3937                                  bnx2x_hilo(&estats->brb_truncate_hi);
3938         nstats->rx_crc_errors =
3939                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3940         nstats->rx_frame_errors =
3941                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3942         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3943         nstats->rx_missed_errors = estats->xxoverflow_discard;
3944
3945         nstats->rx_errors = nstats->rx_length_errors +
3946                             nstats->rx_over_errors +
3947                             nstats->rx_crc_errors +
3948                             nstats->rx_frame_errors +
3949                             nstats->rx_fifo_errors +
3950                             nstats->rx_missed_errors;
3951
3952         nstats->tx_aborted_errors =
3953                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3954                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3955         nstats->tx_carrier_errors =
3956                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3957         nstats->tx_fifo_errors = 0;
3958         nstats->tx_heartbeat_errors = 0;
3959         nstats->tx_window_errors = 0;
3960
3961         nstats->tx_errors = nstats->tx_aborted_errors +
3962                             nstats->tx_carrier_errors +
3963             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3964 }
3965
3966 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3967 {
3968         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3969         int i;
3970
3971         estats->driver_xoff = 0;
3972         estats->rx_err_discard_pkt = 0;
3973         estats->rx_skb_alloc_failed = 0;
3974         estats->hw_csum_err = 0;
3975         for_each_queue(bp, i) {
3976                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3977
3978                 estats->driver_xoff += qstats->driver_xoff;
3979                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3980                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3981                 estats->hw_csum_err += qstats->hw_csum_err;
3982         }
3983 }
3984
3985 static void bnx2x_stats_update(struct bnx2x *bp)
3986 {
3987         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3988
3989         if (*stats_comp != DMAE_COMP_VAL)
3990                 return;
3991
3992         if (bp->port.pmf)
3993                 bnx2x_hw_stats_update(bp);
3994
3995         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3996                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3997                 bnx2x_panic();
3998                 return;
3999         }
4000
4001         bnx2x_net_stats_update(bp);
4002         bnx2x_drv_stats_update(bp);
4003
4004         if (bp->msglevel & NETIF_MSG_TIMER) {
4005                 struct tstorm_per_client_stats *old_tclient =
4006                                                         &bp->fp->old_tclient;
4007                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4008                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4009                 struct net_device_stats *nstats = &bp->dev->stats;
4010                 int i;
4011
4012                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4013                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4014                                   "  tx pkt (%lx)\n",
4015                        bnx2x_tx_avail(bp->fp),
4016                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4017                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4018                                   "  rx pkt (%lx)\n",
4019                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4020                              bp->fp->rx_comp_cons),
4021                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4022                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4023                                   "brb truncate %u\n",
4024                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4025                        qstats->driver_xoff,
4026                        estats->brb_drop_lo, estats->brb_truncate_lo);
4027                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4028                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4029                         "mac_discard %u  mac_filter_discard %u  "
4030                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4031                         "ttl0_discard %u\n",
4032                        le32_to_cpu(old_tclient->checksum_discard),
4033                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4034                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4035                        estats->mac_discard, estats->mac_filter_discard,
4036                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4037                        le32_to_cpu(old_tclient->ttl0_discard));
4038
4039                 for_each_queue(bp, i) {
4040                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4041                                bnx2x_fp(bp, i, tx_pkt),
4042                                bnx2x_fp(bp, i, rx_pkt),
4043                                bnx2x_fp(bp, i, rx_calls));
4044                 }
4045         }
4046
4047         bnx2x_hw_stats_post(bp);
4048         bnx2x_storm_stats_post(bp);
4049 }
4050
4051 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4052 {
4053         struct dmae_command *dmae;
4054         u32 opcode;
4055         int loader_idx = PMF_DMAE_C(bp);
4056         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4057
4058         bp->executer_idx = 0;
4059
4060         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4061                   DMAE_CMD_C_ENABLE |
4062                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4063 #ifdef __BIG_ENDIAN
4064                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4065 #else
4066                   DMAE_CMD_ENDIANITY_DW_SWAP |
4067 #endif
4068                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4069                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4070
4071         if (bp->port.port_stx) {
4072
4073                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4074                 if (bp->func_stx)
4075                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4076                 else
4077                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4078                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4079                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4080                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4081                 dmae->dst_addr_hi = 0;
4082                 dmae->len = sizeof(struct host_port_stats) >> 2;
4083                 if (bp->func_stx) {
4084                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4085                         dmae->comp_addr_hi = 0;
4086                         dmae->comp_val = 1;
4087                 } else {
4088                         dmae->comp_addr_lo =
4089                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4090                         dmae->comp_addr_hi =
4091                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4092                         dmae->comp_val = DMAE_COMP_VAL;
4093
4094                         *stats_comp = 0;
4095                 }
4096         }
4097
4098         if (bp->func_stx) {
4099
4100                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4101                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4102                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4103                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4104                 dmae->dst_addr_lo = bp->func_stx >> 2;
4105                 dmae->dst_addr_hi = 0;
4106                 dmae->len = sizeof(struct host_func_stats) >> 2;
4107                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4108                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4109                 dmae->comp_val = DMAE_COMP_VAL;
4110
4111                 *stats_comp = 0;
4112         }
4113 }
4114
4115 static void bnx2x_stats_stop(struct bnx2x *bp)
4116 {
4117         int update = 0;
4118
4119         bnx2x_stats_comp(bp);
4120
4121         if (bp->port.pmf)
4122                 update = (bnx2x_hw_stats_update(bp) == 0);
4123
4124         update |= (bnx2x_storm_stats_update(bp) == 0);
4125
4126         if (update) {
4127                 bnx2x_net_stats_update(bp);
4128
4129                 if (bp->port.pmf)
4130                         bnx2x_port_stats_stop(bp);
4131
4132                 bnx2x_hw_stats_post(bp);
4133                 bnx2x_stats_comp(bp);
4134         }
4135 }
4136
4137 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4138 {
4139 }
4140
4141 static const struct {
4142         void (*action)(struct bnx2x *bp);
4143         enum bnx2x_stats_state next_state;
4144 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4145 /* state        event   */
4146 {
4147 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4148 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4149 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4150 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4151 },
4152 {
4153 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4154 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4155 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4156 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4157 }
4158 };
4159
4160 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4161 {
4162         enum bnx2x_stats_state state = bp->stats_state;
4163
4164         bnx2x_stats_stm[state][event].action(bp);
4165         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4166
4167         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4168                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4169                    state, event, bp->stats_state);
4170 }
4171
4172 static void bnx2x_timer(unsigned long data)
4173 {
4174         struct bnx2x *bp = (struct bnx2x *) data;
4175
4176         if (!netif_running(bp->dev))
4177                 return;
4178
4179         if (atomic_read(&bp->intr_sem) != 0)
4180                 goto timer_restart;
4181
4182         if (poll) {
4183                 struct bnx2x_fastpath *fp = &bp->fp[0];
4184                 int rc;
4185
4186                 bnx2x_tx_int(fp);
4187                 rc = bnx2x_rx_int(fp, 1000);
4188         }
4189
4190         if (!BP_NOMCP(bp)) {
4191                 int func = BP_FUNC(bp);
4192                 u32 drv_pulse;
4193                 u32 mcp_pulse;
4194
4195                 ++bp->fw_drv_pulse_wr_seq;
4196                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4197                 /* TBD - add SYSTEM_TIME */
4198                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4199                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4200
4201                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4202                              MCP_PULSE_SEQ_MASK);
4203                 /* The delta between driver pulse and mcp response
4204                  * should be 1 (before mcp response) or 0 (after mcp response)
4205                  */
4206                 if ((drv_pulse != mcp_pulse) &&
4207                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4208                         /* someone lost a heartbeat... */
4209                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4210                                   drv_pulse, mcp_pulse);
4211                 }
4212         }
4213
4214         if ((bp->state == BNX2X_STATE_OPEN) ||
4215             (bp->state == BNX2X_STATE_DISABLED))
4216                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4217
4218 timer_restart:
4219         mod_timer(&bp->timer, jiffies + bp->current_interval);
4220 }
4221
4222 /* end of Statistics */
4223
4224 /* nic init */
4225
4226 /*
4227  * nic init service functions
4228  */
4229
4230 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4231 {
4232         int port = BP_PORT(bp);
4233
4234         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4235                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4236                         sizeof(struct ustorm_status_block)/4);
4237         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4238                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4239                         sizeof(struct cstorm_status_block)/4);
4240 }
4241
4242 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4243                           dma_addr_t mapping, int sb_id)
4244 {
4245         int port = BP_PORT(bp);
4246         int func = BP_FUNC(bp);
4247         int index;
4248         u64 section;
4249
4250         /* USTORM */
4251         section = ((u64)mapping) + offsetof(struct host_status_block,
4252                                             u_status_block);
4253         sb->u_status_block.status_block_id = sb_id;
4254
4255         REG_WR(bp, BAR_USTRORM_INTMEM +
4256                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4257         REG_WR(bp, BAR_USTRORM_INTMEM +
4258                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4259                U64_HI(section));
4260         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4261                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4262
4263         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4264                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4265                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4266
4267         /* CSTORM */
4268         section = ((u64)mapping) + offsetof(struct host_status_block,
4269                                             c_status_block);
4270         sb->c_status_block.status_block_id = sb_id;
4271
4272         REG_WR(bp, BAR_CSTRORM_INTMEM +
4273                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4274         REG_WR(bp, BAR_CSTRORM_INTMEM +
4275                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4276                U64_HI(section));
4277         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4278                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4279
4280         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4281                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4282                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4283
4284         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4285 }
4286
4287 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4288 {
4289         int func = BP_FUNC(bp);
4290
4291         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4292                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4293                         sizeof(struct tstorm_def_status_block)/4);
4294         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4295                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4296                         sizeof(struct ustorm_def_status_block)/4);
4297         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4298                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4299                         sizeof(struct cstorm_def_status_block)/4);
4300         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4301                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4302                         sizeof(struct xstorm_def_status_block)/4);
4303 }
4304
4305 static void bnx2x_init_def_sb(struct bnx2x *bp,
4306                               struct host_def_status_block *def_sb,
4307                               dma_addr_t mapping, int sb_id)
4308 {
4309         int port = BP_PORT(bp);
4310         int func = BP_FUNC(bp);
4311         int index, val, reg_offset;
4312         u64 section;
4313
4314         /* ATTN */
4315         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4316                                             atten_status_block);
4317         def_sb->atten_status_block.status_block_id = sb_id;
4318
4319         bp->attn_state = 0;
4320
4321         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4322                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4323
4324         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4325                 bp->attn_group[index].sig[0] = REG_RD(bp,
4326                                                      reg_offset + 0x10*index);
4327                 bp->attn_group[index].sig[1] = REG_RD(bp,
4328                                                reg_offset + 0x4 + 0x10*index);
4329                 bp->attn_group[index].sig[2] = REG_RD(bp,
4330                                                reg_offset + 0x8 + 0x10*index);
4331                 bp->attn_group[index].sig[3] = REG_RD(bp,
4332                                                reg_offset + 0xc + 0x10*index);
4333         }
4334
4335         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4336                              HC_REG_ATTN_MSG0_ADDR_L);
4337
4338         REG_WR(bp, reg_offset, U64_LO(section));
4339         REG_WR(bp, reg_offset + 4, U64_HI(section));
4340
4341         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4342
4343         val = REG_RD(bp, reg_offset);
4344         val |= sb_id;
4345         REG_WR(bp, reg_offset, val);
4346
4347         /* USTORM */
4348         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4349                                             u_def_status_block);
4350         def_sb->u_def_status_block.status_block_id = sb_id;
4351
4352         REG_WR(bp, BAR_USTRORM_INTMEM +
4353                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4354         REG_WR(bp, BAR_USTRORM_INTMEM +
4355                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4356                U64_HI(section));
4357         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4358                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4359
4360         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4361                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4362                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4363
4364         /* CSTORM */
4365         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4366                                             c_def_status_block);
4367         def_sb->c_def_status_block.status_block_id = sb_id;
4368
4369         REG_WR(bp, BAR_CSTRORM_INTMEM +
4370                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4371         REG_WR(bp, BAR_CSTRORM_INTMEM +
4372                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4373                U64_HI(section));
4374         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4375                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4376
4377         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4378                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4379                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4380
4381         /* TSTORM */
4382         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4383                                             t_def_status_block);
4384         def_sb->t_def_status_block.status_block_id = sb_id;
4385
4386         REG_WR(bp, BAR_TSTRORM_INTMEM +
4387                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4388         REG_WR(bp, BAR_TSTRORM_INTMEM +
4389                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4390                U64_HI(section));
4391         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4392                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4393
4394         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4395                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4396                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4397
4398         /* XSTORM */
4399         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4400                                             x_def_status_block);
4401         def_sb->x_def_status_block.status_block_id = sb_id;
4402
4403         REG_WR(bp, BAR_XSTRORM_INTMEM +
4404                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4405         REG_WR(bp, BAR_XSTRORM_INTMEM +
4406                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4407                U64_HI(section));
4408         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4409                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4410
4411         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4412                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4413                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4414
4415         bp->stats_pending = 0;
4416         bp->set_mac_pending = 0;
4417
4418         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4419 }
4420
4421 static void bnx2x_update_coalesce(struct bnx2x *bp)
4422 {
4423         int port = BP_PORT(bp);
4424         int i;
4425
4426         for_each_queue(bp, i) {
4427                 int sb_id = bp->fp[i].sb_id;
4428
4429                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4430                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4431                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4432                                                     U_SB_ETH_RX_CQ_INDEX),
4433                         bp->rx_ticks/12);
4434                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4435                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4436                                                      U_SB_ETH_RX_CQ_INDEX),
4437                          bp->rx_ticks ? 0 : 1);
4438
4439                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4440                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4441                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4442                                                     C_SB_ETH_TX_CQ_INDEX),
4443                         bp->tx_ticks/12);
4444                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4445                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4446                                                      C_SB_ETH_TX_CQ_INDEX),
4447                          bp->tx_ticks ? 0 : 1);
4448         }
4449 }
4450
4451 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4452                                        struct bnx2x_fastpath *fp, int last)
4453 {
4454         int i;
4455
4456         for (i = 0; i < last; i++) {
4457                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4458                 struct sk_buff *skb = rx_buf->skb;
4459
4460                 if (skb == NULL) {
4461                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4462                         continue;
4463                 }
4464
4465                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4466                         pci_unmap_single(bp->pdev,
4467                                          pci_unmap_addr(rx_buf, mapping),
4468                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4469
4470                 dev_kfree_skb(skb);
4471                 rx_buf->skb = NULL;
4472         }
4473 }
4474
4475 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4476 {
4477         int func = BP_FUNC(bp);
4478         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4479                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4480         u16 ring_prod, cqe_ring_prod;
4481         int i, j;
4482
4483         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4484         DP(NETIF_MSG_IFUP,
4485            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4486
4487         if (bp->flags & TPA_ENABLE_FLAG) {
4488
4489                 for_each_rx_queue(bp, j) {
4490                         struct bnx2x_fastpath *fp = &bp->fp[j];
4491
4492                         for (i = 0; i < max_agg_queues; i++) {
4493                                 fp->tpa_pool[i].skb =
4494                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4495                                 if (!fp->tpa_pool[i].skb) {
4496                                         BNX2X_ERR("Failed to allocate TPA "
4497                                                   "skb pool for queue[%d] - "
4498                                                   "disabling TPA on this "
4499                                                   "queue!\n", j);
4500                                         bnx2x_free_tpa_pool(bp, fp, i);
4501                                         fp->disable_tpa = 1;
4502                                         break;
4503                                 }
4504                                 pci_unmap_addr_set((struct sw_rx_bd *)
4505                                                         &bp->fp->tpa_pool[i],
4506                                                    mapping, 0);
4507                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4508                         }
4509                 }
4510         }
4511
4512         for_each_rx_queue(bp, j) {
4513                 struct bnx2x_fastpath *fp = &bp->fp[j];
4514
4515                 fp->rx_bd_cons = 0;
4516                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4517                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4518
4519                 /* "next page" elements initialization */
4520                 /* SGE ring */
4521                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4522                         struct eth_rx_sge *sge;
4523
4524                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4525                         sge->addr_hi =
4526                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4527                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4528                         sge->addr_lo =
4529                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4530                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4531                 }
4532
4533                 bnx2x_init_sge_ring_bit_mask(fp);
4534
4535                 /* RX BD ring */
4536                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4537                         struct eth_rx_bd *rx_bd;
4538
4539                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4540                         rx_bd->addr_hi =
4541                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4542                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4543                         rx_bd->addr_lo =
4544                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4545                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4546                 }
4547
4548                 /* CQ ring */
4549                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4550                         struct eth_rx_cqe_next_page *nextpg;
4551
4552                         nextpg = (struct eth_rx_cqe_next_page *)
4553                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4554                         nextpg->addr_hi =
4555                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4556                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4557                         nextpg->addr_lo =
4558                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4559                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4560                 }
4561
4562                 /* Allocate SGEs and initialize the ring elements */
4563                 for (i = 0, ring_prod = 0;
4564                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4565
4566                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4567                                 BNX2X_ERR("was only able to allocate "
4568                                           "%d rx sges\n", i);
4569                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4570                                 /* Cleanup already allocated elements */
4571                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4572                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4573                                 fp->disable_tpa = 1;
4574                                 ring_prod = 0;
4575                                 break;
4576                         }
4577                         ring_prod = NEXT_SGE_IDX(ring_prod);
4578                 }
4579                 fp->rx_sge_prod = ring_prod;
4580
4581                 /* Allocate BDs and initialize BD ring */
4582                 fp->rx_comp_cons = 0;
4583                 cqe_ring_prod = ring_prod = 0;
4584                 for (i = 0; i < bp->rx_ring_size; i++) {
4585                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4586                                 BNX2X_ERR("was only able to allocate "
4587                                           "%d rx skbs on queue[%d]\n", i, j);
4588                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4589                                 break;
4590                         }
4591                         ring_prod = NEXT_RX_IDX(ring_prod);
4592                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4593                         WARN_ON(ring_prod <= i);
4594                 }
4595
4596                 fp->rx_bd_prod = ring_prod;
4597                 /* must not have more available CQEs than BDs */
4598                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4599                                        cqe_ring_prod);
4600                 fp->rx_pkt = fp->rx_calls = 0;
4601
4602                 /* Warning!
4603                  * this will generate an interrupt (to the TSTORM)
4604                  * must only be done after chip is initialized
4605                  */
4606                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4607                                      fp->rx_sge_prod);
4608                 if (j != 0)
4609                         continue;
4610
4611                 REG_WR(bp, BAR_USTRORM_INTMEM +
4612                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4613                        U64_LO(fp->rx_comp_mapping));
4614                 REG_WR(bp, BAR_USTRORM_INTMEM +
4615                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4616                        U64_HI(fp->rx_comp_mapping));
4617         }
4618 }
4619
4620 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4621 {
4622         int i, j;
4623
4624         for_each_tx_queue(bp, j) {
4625                 struct bnx2x_fastpath *fp = &bp->fp[j];
4626
4627                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4628                         struct eth_tx_bd *tx_bd =
4629                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4630
4631                         tx_bd->addr_hi =
4632                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4633                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4634                         tx_bd->addr_lo =
4635                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4636                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4637                 }
4638
4639                 fp->tx_pkt_prod = 0;
4640                 fp->tx_pkt_cons = 0;
4641                 fp->tx_bd_prod = 0;
4642                 fp->tx_bd_cons = 0;
4643                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4644                 fp->tx_pkt = 0;
4645         }
4646 }
4647
4648 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4649 {
4650         int func = BP_FUNC(bp);
4651
4652         spin_lock_init(&bp->spq_lock);
4653
4654         bp->spq_left = MAX_SPQ_PENDING;
4655         bp->spq_prod_idx = 0;
4656         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4657         bp->spq_prod_bd = bp->spq;
4658         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4659
4660         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4661                U64_LO(bp->spq_mapping));
4662         REG_WR(bp,
4663                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4664                U64_HI(bp->spq_mapping));
4665
4666         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4667                bp->spq_prod_idx);
4668 }
4669
4670 static void bnx2x_init_context(struct bnx2x *bp)
4671 {
4672         int i;
4673
4674         for_each_queue(bp, i) {
4675                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4676                 struct bnx2x_fastpath *fp = &bp->fp[i];
4677                 u8 cl_id = fp->cl_id;
4678                 u8 sb_id = fp->sb_id;
4679
4680                 context->ustorm_st_context.common.sb_index_numbers =
4681                                                 BNX2X_RX_SB_INDEX_NUM;
4682                 context->ustorm_st_context.common.clientId = cl_id;
4683                 context->ustorm_st_context.common.status_block_id = sb_id;
4684                 context->ustorm_st_context.common.flags =
4685                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4686                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4687                 context->ustorm_st_context.common.statistics_counter_id =
4688                                                 cl_id;
4689                 context->ustorm_st_context.common.mc_alignment_log_size =
4690                                                 BNX2X_RX_ALIGN_SHIFT;
4691                 context->ustorm_st_context.common.bd_buff_size =
4692                                                 bp->rx_buf_size;
4693                 context->ustorm_st_context.common.bd_page_base_hi =
4694                                                 U64_HI(fp->rx_desc_mapping);
4695                 context->ustorm_st_context.common.bd_page_base_lo =
4696                                                 U64_LO(fp->rx_desc_mapping);
4697                 if (!fp->disable_tpa) {
4698                         context->ustorm_st_context.common.flags |=
4699                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4700                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4701                         context->ustorm_st_context.common.sge_buff_size =
4702                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4703                                          (u32)0xffff);
4704                         context->ustorm_st_context.common.sge_page_base_hi =
4705                                                 U64_HI(fp->rx_sge_mapping);
4706                         context->ustorm_st_context.common.sge_page_base_lo =
4707                                                 U64_LO(fp->rx_sge_mapping);
4708                 }
4709
4710                 context->ustorm_ag_context.cdu_usage =
4711                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4712                                                CDU_REGION_NUMBER_UCM_AG,
4713                                                ETH_CONNECTION_TYPE);
4714
4715                 context->xstorm_st_context.tx_bd_page_base_hi =
4716                                                 U64_HI(fp->tx_desc_mapping);
4717                 context->xstorm_st_context.tx_bd_page_base_lo =
4718                                                 U64_LO(fp->tx_desc_mapping);
4719                 context->xstorm_st_context.db_data_addr_hi =
4720                                                 U64_HI(fp->tx_prods_mapping);
4721                 context->xstorm_st_context.db_data_addr_lo =
4722                                                 U64_LO(fp->tx_prods_mapping);
4723                 context->xstorm_st_context.statistics_data = (cl_id |
4724                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4725                 context->cstorm_st_context.sb_index_number =
4726                                                 C_SB_ETH_TX_CQ_INDEX;
4727                 context->cstorm_st_context.status_block_id = sb_id;
4728
4729                 context->xstorm_ag_context.cdu_reserved =
4730                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4731                                                CDU_REGION_NUMBER_XCM_AG,
4732                                                ETH_CONNECTION_TYPE);
4733         }
4734 }
4735
4736 static void bnx2x_init_ind_table(struct bnx2x *bp)
4737 {
4738         int func = BP_FUNC(bp);
4739         int i;
4740
4741         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4742                 return;
4743
4744         DP(NETIF_MSG_IFUP,
4745            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4746         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4747                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4748                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4749                         bp->fp->cl_id + (i % bp->num_rx_queues));
4750 }
4751
4752 static void bnx2x_set_client_config(struct bnx2x *bp)
4753 {
4754         struct tstorm_eth_client_config tstorm_client = {0};
4755         int port = BP_PORT(bp);
4756         int i;
4757
4758         tstorm_client.mtu = bp->dev->mtu;
4759         tstorm_client.config_flags =
4760                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4761                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4762 #ifdef BCM_VLAN
4763         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4764                 tstorm_client.config_flags |=
4765                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4766                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4767         }
4768 #endif
4769
4770         if (bp->flags & TPA_ENABLE_FLAG) {
4771                 tstorm_client.max_sges_for_packet =
4772                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4773                 tstorm_client.max_sges_for_packet =
4774                         ((tstorm_client.max_sges_for_packet +
4775                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4776                         PAGES_PER_SGE_SHIFT;
4777
4778                 tstorm_client.config_flags |=
4779                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4780         }
4781
4782         for_each_queue(bp, i) {
4783                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4784
4785                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4786                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4787                        ((u32 *)&tstorm_client)[0]);
4788                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4789                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4790                        ((u32 *)&tstorm_client)[1]);
4791         }
4792
4793         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4794            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4795 }
4796
4797 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4798 {
4799         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4800         int mode = bp->rx_mode;
4801         int mask = (1 << BP_L_ID(bp));
4802         int func = BP_FUNC(bp);
4803         int i;
4804
4805         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4806
4807         switch (mode) {
4808         case BNX2X_RX_MODE_NONE: /* no Rx */
4809                 tstorm_mac_filter.ucast_drop_all = mask;
4810                 tstorm_mac_filter.mcast_drop_all = mask;
4811                 tstorm_mac_filter.bcast_drop_all = mask;
4812                 break;
4813
4814         case BNX2X_RX_MODE_NORMAL:
4815                 tstorm_mac_filter.bcast_accept_all = mask;
4816                 break;
4817
4818         case BNX2X_RX_MODE_ALLMULTI:
4819                 tstorm_mac_filter.mcast_accept_all = mask;
4820                 tstorm_mac_filter.bcast_accept_all = mask;
4821                 break;
4822
4823         case BNX2X_RX_MODE_PROMISC:
4824                 tstorm_mac_filter.ucast_accept_all = mask;
4825                 tstorm_mac_filter.mcast_accept_all = mask;
4826                 tstorm_mac_filter.bcast_accept_all = mask;
4827                 break;
4828
4829         default:
4830                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4831                 break;
4832         }
4833
4834         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4835                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4836                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4837                        ((u32 *)&tstorm_mac_filter)[i]);
4838
4839 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4840                    ((u32 *)&tstorm_mac_filter)[i]); */
4841         }
4842
4843         if (mode != BNX2X_RX_MODE_NONE)
4844                 bnx2x_set_client_config(bp);
4845 }
4846
4847 static void bnx2x_init_internal_common(struct bnx2x *bp)
4848 {
4849         int i;
4850
4851         if (bp->flags & TPA_ENABLE_FLAG) {
4852                 struct tstorm_eth_tpa_exist tpa = {0};
4853
4854                 tpa.tpa_exist = 1;
4855
4856                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4857                        ((u32 *)&tpa)[0]);
4858                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4859                        ((u32 *)&tpa)[1]);
4860         }
4861
4862         /* Zero this manually as its initialization is
4863            currently missing in the initTool */
4864         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4865                 REG_WR(bp, BAR_USTRORM_INTMEM +
4866                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4867 }
4868
4869 static void bnx2x_init_internal_port(struct bnx2x *bp)
4870 {
4871         int port = BP_PORT(bp);
4872
4873         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4874         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4875         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4876         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4877 }
4878
4879 /* Calculates the sum of vn_min_rates.
4880    It's needed for further normalizing of the min_rates.
4881    Returns:
4882      sum of vn_min_rates.
4883        or
4884      0 - if all the min_rates are 0.
4885      In the later case fainess algorithm should be deactivated.
4886      If not all min_rates are zero then those that are zeroes will be set to 1.
4887  */
4888 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4889 {
4890         int all_zero = 1;
4891         int port = BP_PORT(bp);
4892         int vn;
4893
4894         bp->vn_weight_sum = 0;
4895         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4896                 int func = 2*vn + port;
4897                 u32 vn_cfg =
4898                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4899                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4900                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4901
4902                 /* Skip hidden vns */
4903                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4904                         continue;
4905
4906                 /* If min rate is zero - set it to 1 */
4907                 if (!vn_min_rate)
4908                         vn_min_rate = DEF_MIN_RATE;
4909                 else
4910                         all_zero = 0;
4911
4912                 bp->vn_weight_sum += vn_min_rate;
4913         }
4914
4915         /* ... only if all min rates are zeros - disable fairness */
4916         if (all_zero)
4917                 bp->vn_weight_sum = 0;
4918 }
4919
4920 static void bnx2x_init_internal_func(struct bnx2x *bp)
4921 {
4922         struct tstorm_eth_function_common_config tstorm_config = {0};
4923         struct stats_indication_flags stats_flags = {0};
4924         int port = BP_PORT(bp);
4925         int func = BP_FUNC(bp);
4926         int i, j;
4927         u32 offset;
4928         u16 max_agg_size;
4929
4930         if (is_multi(bp)) {
4931                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4932                 tstorm_config.rss_result_mask = MULTI_MASK;
4933         }
4934         if (IS_E1HMF(bp))
4935                 tstorm_config.config_flags |=
4936                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4937
4938         tstorm_config.leading_client_id = BP_L_ID(bp);
4939
4940         REG_WR(bp, BAR_TSTRORM_INTMEM +
4941                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4942                (*(u32 *)&tstorm_config));
4943
4944         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4945         bnx2x_set_storm_rx_mode(bp);
4946
4947         for_each_queue(bp, i) {
4948                 u8 cl_id = bp->fp[i].cl_id;
4949
4950                 /* reset xstorm per client statistics */
4951                 offset = BAR_XSTRORM_INTMEM +
4952                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953                 for (j = 0;
4954                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4955                         REG_WR(bp, offset + j*4, 0);
4956
4957                 /* reset tstorm per client statistics */
4958                 offset = BAR_TSTRORM_INTMEM +
4959                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4960                 for (j = 0;
4961                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4962                         REG_WR(bp, offset + j*4, 0);
4963
4964                 /* reset ustorm per client statistics */
4965                 offset = BAR_USTRORM_INTMEM +
4966                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4967                 for (j = 0;
4968                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4969                         REG_WR(bp, offset + j*4, 0);
4970         }
4971
4972         /* Init statistics related context */
4973         stats_flags.collect_eth = 1;
4974
4975         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4976                ((u32 *)&stats_flags)[0]);
4977         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4978                ((u32 *)&stats_flags)[1]);
4979
4980         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4981                ((u32 *)&stats_flags)[0]);
4982         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4983                ((u32 *)&stats_flags)[1]);
4984
4985         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4986                ((u32 *)&stats_flags)[0]);
4987         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4988                ((u32 *)&stats_flags)[1]);
4989
4990         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4991                ((u32 *)&stats_flags)[0]);
4992         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4993                ((u32 *)&stats_flags)[1]);
4994
4995         REG_WR(bp, BAR_XSTRORM_INTMEM +
4996                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998         REG_WR(bp, BAR_XSTRORM_INTMEM +
4999                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5001
5002         REG_WR(bp, BAR_TSTRORM_INTMEM +
5003                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5004                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5005         REG_WR(bp, BAR_TSTRORM_INTMEM +
5006                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5007                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5008
5009         REG_WR(bp, BAR_USTRORM_INTMEM +
5010                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5011                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5012         REG_WR(bp, BAR_USTRORM_INTMEM +
5013                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5014                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5015
5016         if (CHIP_IS_E1H(bp)) {
5017                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5018                         IS_E1HMF(bp));
5019                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5020                         IS_E1HMF(bp));
5021                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5022                         IS_E1HMF(bp));
5023                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5024                         IS_E1HMF(bp));
5025
5026                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5027                          bp->e1hov);
5028         }
5029
5030         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5031         max_agg_size =
5032                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5033                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5034                     (u32)0xffff);
5035         for_each_rx_queue(bp, i) {
5036                 struct bnx2x_fastpath *fp = &bp->fp[i];
5037
5038                 REG_WR(bp, BAR_USTRORM_INTMEM +
5039                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5040                        U64_LO(fp->rx_comp_mapping));
5041                 REG_WR(bp, BAR_USTRORM_INTMEM +
5042                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5043                        U64_HI(fp->rx_comp_mapping));
5044
5045                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5046                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5047                          max_agg_size);
5048         }
5049
5050         /* dropless flow control */
5051         if (CHIP_IS_E1H(bp)) {
5052                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5053
5054                 rx_pause.bd_thr_low = 250;
5055                 rx_pause.cqe_thr_low = 250;
5056                 rx_pause.cos = 1;
5057                 rx_pause.sge_thr_low = 0;
5058                 rx_pause.bd_thr_high = 350;
5059                 rx_pause.cqe_thr_high = 350;
5060                 rx_pause.sge_thr_high = 0;
5061
5062                 for_each_rx_queue(bp, i) {
5063                         struct bnx2x_fastpath *fp = &bp->fp[i];
5064
5065                         if (!fp->disable_tpa) {
5066                                 rx_pause.sge_thr_low = 150;
5067                                 rx_pause.sge_thr_high = 250;
5068                         }
5069
5070
5071                         offset = BAR_USTRORM_INTMEM +
5072                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5073                                                                    fp->cl_id);
5074                         for (j = 0;
5075                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5076                              j++)
5077                                 REG_WR(bp, offset + j*4,
5078                                        ((u32 *)&rx_pause)[j]);
5079                 }
5080         }
5081
5082         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5083
5084         /* Init rate shaping and fairness contexts */
5085         if (IS_E1HMF(bp)) {
5086                 int vn;
5087
5088                 /* During init there is no active link
5089                    Until link is up, set link rate to 10Gbps */
5090                 bp->link_vars.line_speed = SPEED_10000;
5091                 bnx2x_init_port_minmax(bp);
5092
5093                 bnx2x_calc_vn_weight_sum(bp);
5094
5095                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5096                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5097
5098                 /* Enable rate shaping and fairness */
5099                 bp->cmng.flags.cmng_enables =
5100                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5101                 if (bp->vn_weight_sum)
5102                         bp->cmng.flags.cmng_enables |=
5103                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5104                 else
5105                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5106                            "  fairness will be disabled\n");
5107         } else {
5108                 /* rate shaping and fairness are disabled */
5109                 DP(NETIF_MSG_IFUP,
5110                    "single function mode  minmax will be disabled\n");
5111         }
5112
5113
5114         /* Store it to internal memory */
5115         if (bp->port.pmf)
5116                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5117                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5118                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5119                                ((u32 *)(&bp->cmng))[i]);
5120 }
5121
5122 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5123 {
5124         switch (load_code) {
5125         case FW_MSG_CODE_DRV_LOAD_COMMON:
5126                 bnx2x_init_internal_common(bp);
5127                 /* no break */
5128
5129         case FW_MSG_CODE_DRV_LOAD_PORT:
5130                 bnx2x_init_internal_port(bp);
5131                 /* no break */
5132
5133         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5134                 bnx2x_init_internal_func(bp);
5135                 break;
5136
5137         default:
5138                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5139                 break;
5140         }
5141 }
5142
5143 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5144 {
5145         int i;
5146
5147         for_each_queue(bp, i) {
5148                 struct bnx2x_fastpath *fp = &bp->fp[i];
5149
5150                 fp->bp = bp;
5151                 fp->state = BNX2X_FP_STATE_CLOSED;
5152                 fp->index = i;
5153                 fp->cl_id = BP_L_ID(bp) + i;
5154                 fp->sb_id = fp->cl_id;
5155                 DP(NETIF_MSG_IFUP,
5156                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5157                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5158                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5159                               fp->sb_id);
5160                 bnx2x_update_fpsb_idx(fp);
5161         }
5162
5163         /* ensure status block indices were read */
5164         rmb();
5165
5166
5167         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5168                           DEF_SB_ID);
5169         bnx2x_update_dsb_idx(bp);
5170         bnx2x_update_coalesce(bp);
5171         bnx2x_init_rx_rings(bp);
5172         bnx2x_init_tx_ring(bp);
5173         bnx2x_init_sp_ring(bp);
5174         bnx2x_init_context(bp);
5175         bnx2x_init_internal(bp, load_code);
5176         bnx2x_init_ind_table(bp);
5177         bnx2x_stats_init(bp);
5178
5179         /* At this point, we are ready for interrupts */
5180         atomic_set(&bp->intr_sem, 0);
5181
5182         /* flush all before enabling interrupts */
5183         mb();
5184         mmiowb();
5185
5186         bnx2x_int_enable(bp);
5187 }
5188
5189 /* end of nic init */
5190
5191 /*
5192  * gzip service functions
5193  */
5194
5195 static int bnx2x_gunzip_init(struct bnx2x *bp)
5196 {
5197         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5198                                               &bp->gunzip_mapping);
5199         if (bp->gunzip_buf  == NULL)
5200                 goto gunzip_nomem1;
5201
5202         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5203         if (bp->strm  == NULL)
5204                 goto gunzip_nomem2;
5205
5206         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5207                                       GFP_KERNEL);
5208         if (bp->strm->workspace == NULL)
5209                 goto gunzip_nomem3;
5210
5211         return 0;
5212
5213 gunzip_nomem3:
5214         kfree(bp->strm);
5215         bp->strm = NULL;
5216
5217 gunzip_nomem2:
5218         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5219                             bp->gunzip_mapping);
5220         bp->gunzip_buf = NULL;
5221
5222 gunzip_nomem1:
5223         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5224                " un-compression\n", bp->dev->name);
5225         return -ENOMEM;
5226 }
5227
5228 static void bnx2x_gunzip_end(struct bnx2x *bp)
5229 {
5230         kfree(bp->strm->workspace);
5231
5232         kfree(bp->strm);
5233         bp->strm = NULL;
5234
5235         if (bp->gunzip_buf) {
5236                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5237                                     bp->gunzip_mapping);
5238                 bp->gunzip_buf = NULL;
5239         }
5240 }
5241
5242 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5243 {
5244         int n, rc;
5245
5246         /* check gzip header */
5247         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5248                 BNX2X_ERR("Bad gzip header\n");
5249                 return -EINVAL;
5250         }
5251
5252         n = 10;
5253
5254 #define FNAME                           0x8
5255
5256         if (zbuf[3] & FNAME)
5257                 while ((zbuf[n++] != 0) && (n < len));
5258
5259         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5260         bp->strm->avail_in = len - n;
5261         bp->strm->next_out = bp->gunzip_buf;
5262         bp->strm->avail_out = FW_BUF_SIZE;
5263
5264         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5265         if (rc != Z_OK)
5266                 return rc;
5267
5268         rc = zlib_inflate(bp->strm, Z_FINISH);
5269         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5270                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5271                        bp->dev->name, bp->strm->msg);
5272
5273         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5274         if (bp->gunzip_outlen & 0x3)
5275                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5276                                     " gunzip_outlen (%d) not aligned\n",
5277                        bp->dev->name, bp->gunzip_outlen);
5278         bp->gunzip_outlen >>= 2;
5279
5280         zlib_inflateEnd(bp->strm);
5281
5282         if (rc == Z_STREAM_END)
5283                 return 0;
5284
5285         return rc;
5286 }
5287
5288 /* nic load/unload */
5289
5290 /*
5291  * General service functions
5292  */
5293
5294 /* send a NIG loopback debug packet */
5295 static void bnx2x_lb_pckt(struct bnx2x *bp)
5296 {
5297         u32 wb_write[3];
5298
5299         /* Ethernet source and destination addresses */
5300         wb_write[0] = 0x55555555;
5301         wb_write[1] = 0x55555555;
5302         wb_write[2] = 0x20;             /* SOP */
5303         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5304
5305         /* NON-IP protocol */
5306         wb_write[0] = 0x09000000;
5307         wb_write[1] = 0x55555555;
5308         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5309         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5310 }
5311
5312 /* some of the internal memories
5313  * are not directly readable from the driver
5314  * to test them we send debug packets
5315  */
5316 static int bnx2x_int_mem_test(struct bnx2x *bp)
5317 {
5318         int factor;
5319         int count, i;
5320         u32 val = 0;
5321
5322         if (CHIP_REV_IS_FPGA(bp))
5323                 factor = 120;
5324         else if (CHIP_REV_IS_EMUL(bp))
5325                 factor = 200;
5326         else
5327                 factor = 1;
5328
5329         DP(NETIF_MSG_HW, "start part1\n");
5330
5331         /* Disable inputs of parser neighbor blocks */
5332         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5333         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5334         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5335         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5336
5337         /*  Write 0 to parser credits for CFC search request */
5338         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5339
5340         /* send Ethernet packet */
5341         bnx2x_lb_pckt(bp);
5342
5343         /* TODO do i reset NIG statistic? */
5344         /* Wait until NIG register shows 1 packet of size 0x10 */
5345         count = 1000 * factor;
5346         while (count) {
5347
5348                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5349                 val = *bnx2x_sp(bp, wb_data[0]);
5350                 if (val == 0x10)
5351                         break;
5352
5353                 msleep(10);
5354                 count--;
5355         }
5356         if (val != 0x10) {
5357                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5358                 return -1;
5359         }
5360
5361         /* Wait until PRS register shows 1 packet */
5362         count = 1000 * factor;
5363         while (count) {
5364                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5365                 if (val == 1)
5366                         break;
5367
5368                 msleep(10);
5369                 count--;
5370         }
5371         if (val != 0x1) {
5372                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5373                 return -2;
5374         }
5375
5376         /* Reset and init BRB, PRS */
5377         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5378         msleep(50);
5379         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5380         msleep(50);
5381         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5382         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5383
5384         DP(NETIF_MSG_HW, "part2\n");
5385
5386         /* Disable inputs of parser neighbor blocks */
5387         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5388         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5389         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5390         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5391
5392         /* Write 0 to parser credits for CFC search request */
5393         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5394
5395         /* send 10 Ethernet packets */
5396         for (i = 0; i < 10; i++)
5397                 bnx2x_lb_pckt(bp);
5398
5399         /* Wait until NIG register shows 10 + 1
5400            packets of size 11*0x10 = 0xb0 */
5401         count = 1000 * factor;
5402         while (count) {
5403
5404                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5405                 val = *bnx2x_sp(bp, wb_data[0]);
5406                 if (val == 0xb0)
5407                         break;
5408
5409                 msleep(10);
5410                 count--;
5411         }
5412         if (val != 0xb0) {
5413                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5414                 return -3;
5415         }
5416
5417         /* Wait until PRS register shows 2 packets */
5418         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5419         if (val != 2)
5420                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5421
5422         /* Write 1 to parser credits for CFC search request */
5423         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5424
5425         /* Wait until PRS register shows 3 packets */
5426         msleep(10 * factor);
5427         /* Wait until NIG register shows 1 packet of size 0x10 */
5428         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5429         if (val != 3)
5430                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5431
5432         /* clear NIG EOP FIFO */
5433         for (i = 0; i < 11; i++)
5434                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5435         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5436         if (val != 1) {
5437                 BNX2X_ERR("clear of NIG failed\n");
5438                 return -4;
5439         }
5440
5441         /* Reset and init BRB, PRS, NIG */
5442         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5443         msleep(50);
5444         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5445         msleep(50);
5446         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5447         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5448 #ifndef BCM_ISCSI
5449         /* set NIC mode */
5450         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5451 #endif
5452
5453         /* Enable inputs of parser neighbor blocks */
5454         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5455         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5456         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5457         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5458
5459         DP(NETIF_MSG_HW, "done\n");
5460
5461         return 0; /* OK */
5462 }
5463
5464 static void enable_blocks_attention(struct bnx2x *bp)
5465 {
5466         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5467         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5468         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5469         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5470         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5471         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5472         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5473         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5474         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5475 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5476 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5477         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5478         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5479         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5480 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5481 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5482         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5483         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5484         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5485         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5486 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5487 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5488         if (CHIP_REV_IS_FPGA(bp))
5489                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5490         else
5491                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5492         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5493         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5494         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5495 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5496 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5497         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5498         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5499 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5500         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5501 }
5502
5503
5504 static void bnx2x_reset_common(struct bnx2x *bp)
5505 {
5506         /* reset_common */
5507         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5508                0xd3ffff7f);
5509         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5510 }
5511
5512 static int bnx2x_init_common(struct bnx2x *bp)
5513 {
5514         u32 val, i;
5515
5516         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5517
5518         bnx2x_reset_common(bp);
5519         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5520         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5521
5522         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5523         if (CHIP_IS_E1H(bp))
5524                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5525
5526         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5527         msleep(30);
5528         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5529
5530         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5531         if (CHIP_IS_E1(bp)) {
5532                 /* enable HW interrupt from PXP on USDM overflow
5533                    bit 16 on INT_MASK_0 */
5534                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5535         }
5536
5537         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5538         bnx2x_init_pxp(bp);
5539
5540 #ifdef __BIG_ENDIAN
5541         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5542         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5543         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5544         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5545         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5546         /* make sure this value is 0 */
5547         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5548
5549 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5550         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5551         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5552         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5553         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5554 #endif
5555
5556         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5557 #ifdef BCM_ISCSI
5558         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5559         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5560         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5561 #endif
5562
5563         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5564                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5565
5566         /* let the HW do it's magic ... */
5567         msleep(100);
5568         /* finish PXP init */
5569         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5570         if (val != 1) {
5571                 BNX2X_ERR("PXP2 CFG failed\n");
5572                 return -EBUSY;
5573         }
5574         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5575         if (val != 1) {
5576                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5577                 return -EBUSY;
5578         }
5579
5580         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5581         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5582
5583         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5584
5585         /* clean the DMAE memory */
5586         bp->dmae_ready = 1;
5587         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5588
5589         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5590         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5591         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5592         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5593
5594         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5595         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5596         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5597         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5598
5599         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5600         /* soft reset pulse */
5601         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5602         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5603
5604 #ifdef BCM_ISCSI
5605         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5606 #endif
5607
5608         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5609         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5610         if (!CHIP_REV_IS_SLOW(bp)) {
5611                 /* enable hw interrupt from doorbell Q */
5612                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5613         }
5614
5615         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5616         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5617         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5618         /* set NIC mode */
5619         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5620         if (CHIP_IS_E1H(bp))
5621                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5622
5623         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5624         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5625         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5626         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5627
5628         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5629         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5630         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5631         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5632
5633         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5634         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5635         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5636         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5637
5638         /* sync semi rtc */
5639         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5640                0x80000000);
5641         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5642                0x80000000);
5643
5644         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5645         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5646         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5647
5648         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5649         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5650                 REG_WR(bp, i, 0xc0cac01a);
5651                 /* TODO: replace with something meaningful */
5652         }
5653         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5654         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5655
5656         if (sizeof(union cdu_context) != 1024)
5657                 /* we currently assume that a context is 1024 bytes */
5658                 printk(KERN_ALERT PFX "please adjust the size of"
5659                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5660
5661         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5662         val = (4 << 24) + (0 << 12) + 1024;
5663         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5664         if (CHIP_IS_E1(bp)) {
5665                 /* !!! fix pxp client crdit until excel update */
5666                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5667                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5668         }
5669
5670         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5671         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5672         /* enable context validation interrupt from CFC */
5673         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5674
5675         /* set the thresholds to prevent CFC/CDU race */
5676         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5677
5678         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5679         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5680
5681         /* PXPCS COMMON comes here */
5682         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5683         /* Reset PCIE errors for debug */
5684         REG_WR(bp, 0x2814, 0xffffffff);
5685         REG_WR(bp, 0x3820, 0xffffffff);
5686
5687         /* EMAC0 COMMON comes here */
5688         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5689         /* EMAC1 COMMON comes here */
5690         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5691         /* DBU COMMON comes here */
5692         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5693         /* DBG COMMON comes here */
5694         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5695
5696         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5697         if (CHIP_IS_E1H(bp)) {
5698                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5699                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5700         }
5701
5702         if (CHIP_REV_IS_SLOW(bp))
5703                 msleep(200);
5704
5705         /* finish CFC init */
5706         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5707         if (val != 1) {
5708                 BNX2X_ERR("CFC LL_INIT failed\n");
5709                 return -EBUSY;
5710         }
5711         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5712         if (val != 1) {
5713                 BNX2X_ERR("CFC AC_INIT failed\n");
5714                 return -EBUSY;
5715         }
5716         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5717         if (val != 1) {
5718                 BNX2X_ERR("CFC CAM_INIT failed\n");
5719                 return -EBUSY;
5720         }
5721         REG_WR(bp, CFC_REG_DEBUG0, 0);
5722
5723         /* read NIG statistic
5724            to see if this is our first up since powerup */
5725         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5726         val = *bnx2x_sp(bp, wb_data[0]);
5727
5728         /* do internal memory self test */
5729         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5730                 BNX2X_ERR("internal mem self test failed\n");
5731                 return -EBUSY;
5732         }
5733
5734         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5735         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5736         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5737         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5738                 bp->port.need_hw_lock = 1;
5739                 break;
5740
5741         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5742                 /* Fan failure is indicated by SPIO 5 */
5743                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5744                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5745
5746                 /* set to active low mode */
5747                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5748                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5749                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5750                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5751
5752                 /* enable interrupt to signal the IGU */
5753                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5754                 val |= (1 << MISC_REGISTERS_SPIO_5);
5755                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5756                 break;
5757
5758         default:
5759                 break;
5760         }
5761
5762         /* clear PXP2 attentions */
5763         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5764
5765         enable_blocks_attention(bp);
5766
5767         if (!BP_NOMCP(bp)) {
5768                 bnx2x_acquire_phy_lock(bp);
5769                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5770                 bnx2x_release_phy_lock(bp);
5771         } else
5772                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5773
5774         return 0;
5775 }
5776
5777 static int bnx2x_init_port(struct bnx2x *bp)
5778 {
5779         int port = BP_PORT(bp);
5780         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5781         u32 low, high;
5782         u32 val;
5783
5784         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5785
5786         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5787
5788         /* Port PXP comes here */
5789         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5790         /* Port PXP2 comes here */
5791         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5792 #ifdef BCM_ISCSI
5793         /* Port0  1
5794          * Port1  385 */
5795         i++;
5796         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5797         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5798         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5799         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5800
5801         /* Port0  2
5802          * Port1  386 */
5803         i++;
5804         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5805         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5806         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5807         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5808
5809         /* Port0  3
5810          * Port1  387 */
5811         i++;
5812         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5813         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5814         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5815         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5816 #endif
5817         /* Port CMs come here */
5818         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5819
5820         /* Port QM comes here */
5821 #ifdef BCM_ISCSI
5822         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5823         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5824
5825         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5826 #endif
5827         /* Port DQ comes here */
5828         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5829
5830         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5831         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5832                 /* no pause for emulation and FPGA */
5833                 low = 0;
5834                 high = 513;
5835         } else {
5836                 if (IS_E1HMF(bp))
5837                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5838                 else if (bp->dev->mtu > 4096) {
5839                         if (bp->flags & ONE_PORT_FLAG)
5840                                 low = 160;
5841                         else {
5842                                 val = bp->dev->mtu;
5843                                 /* (24*1024 + val*4)/256 */
5844                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5845                         }
5846                 } else
5847                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5848                 high = low + 56;        /* 14*1024/256 */
5849         }
5850         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5851         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5852
5853
5854         /* Port PRS comes here */
5855         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5856         /* Port TSDM comes here */
5857         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5858         /* Port CSDM comes here */
5859         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5860         /* Port USDM comes here */
5861         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5862         /* Port XSDM comes here */
5863         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5864
5865         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5866         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5867         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5868         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5869
5870         /* Port UPB comes here */
5871         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5872         /* Port XPB comes here */
5873         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5874
5875         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5876
5877         /* configure PBF to work without PAUSE mtu 9000 */
5878         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5879
5880         /* update threshold */
5881         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5882         /* update init credit */
5883         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5884
5885         /* probe changes */
5886         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5887         msleep(5);
5888         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5889
5890 #ifdef BCM_ISCSI
5891         /* tell the searcher where the T2 table is */
5892         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5893
5894         wb_write[0] = U64_LO(bp->t2_mapping);
5895         wb_write[1] = U64_HI(bp->t2_mapping);
5896         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5897         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5898         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5899         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5900
5901         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5902         /* Port SRCH comes here */
5903 #endif
5904         /* Port CDU comes here */
5905         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5906         /* Port CFC comes here */
5907         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5908
5909         if (CHIP_IS_E1(bp)) {
5910                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5911                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5912         }
5913         bnx2x_init_block(bp, HC_BLOCK, init_stage);
5914
5915         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5916         /* init aeu_mask_attn_func_0/1:
5917          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5918          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5919          *             bits 4-7 are used for "per vn group attention" */
5920         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5921                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5922
5923         /* Port PXPCS comes here */
5924         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5925         /* Port EMAC0 comes here */
5926         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5927         /* Port EMAC1 comes here */
5928         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5929         /* Port DBU comes here */
5930         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5931         /* Port DBG comes here */
5932         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5933
5934         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5935
5936         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5937
5938         if (CHIP_IS_E1H(bp)) {
5939                 /* 0x2 disable e1hov, 0x1 enable */
5940                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5941                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5942
5943                 /* support pause requests from USDM, TSDM and BRB */
5944                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5945
5946                 {
5947                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5948                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5949                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5950                 }
5951         }
5952
5953         /* Port MCP comes here */
5954         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5955         /* Port DMAE comes here */
5956         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5957
5958         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5959         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5960                 {
5961                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5962
5963                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5964                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5965
5966                 /* The GPIO should be swapped if the swap register is
5967                    set and active */
5968                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5969                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5970
5971                 /* Select function upon port-swap configuration */
5972                 if (port == 0) {
5973                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5974                         aeu_gpio_mask = (swap_val && swap_override) ?
5975                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5976                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5977                 } else {
5978                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5979                         aeu_gpio_mask = (swap_val && swap_override) ?
5980                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5981                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5982                 }
5983                 val = REG_RD(bp, offset);
5984                 /* add GPIO3 to group */
5985                 val |= aeu_gpio_mask;
5986                 REG_WR(bp, offset, val);
5987                 }
5988                 break;
5989
5990         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5991                 /* add SPIO 5 to group 0 */
5992                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5993                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5994                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5995                 break;
5996
5997         default:
5998                 break;
5999         }
6000
6001         bnx2x__link_reset(bp);
6002
6003         return 0;
6004 }
6005
6006 #define ILT_PER_FUNC            (768/2)
6007 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6008 /* the phys address is shifted right 12 bits and has an added
6009    1=valid bit added to the 53rd bit
6010    then since this is a wide register(TM)
6011    we split it into two 32 bit writes
6012  */
6013 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6014 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6015 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6016 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6017
6018 #define CNIC_ILT_LINES          0
6019
6020 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6021 {
6022         int reg;
6023
6024         if (CHIP_IS_E1H(bp))
6025                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6026         else /* E1 */
6027                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6028
6029         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6030 }
6031
6032 static int bnx2x_init_func(struct bnx2x *bp)
6033 {
6034         int port = BP_PORT(bp);
6035         int func = BP_FUNC(bp);
6036         u32 addr, val;
6037         int i;
6038
6039         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6040
6041         /* set MSI reconfigure capability */
6042         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6043         val = REG_RD(bp, addr);
6044         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6045         REG_WR(bp, addr, val);
6046
6047         i = FUNC_ILT_BASE(func);
6048
6049         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6050         if (CHIP_IS_E1H(bp)) {
6051                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6052                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6053         } else /* E1 */
6054                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6055                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6056
6057
6058         if (CHIP_IS_E1H(bp)) {
6059                 for (i = 0; i < 9; i++)
6060                         bnx2x_init_block(bp,
6061                                          cm_blocks[i], FUNC0_STAGE + func);
6062
6063                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6064                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6065         }
6066
6067         /* HC init per function */
6068         if (CHIP_IS_E1H(bp)) {
6069                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6070
6071                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6072                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6073         }
6074         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6075
6076         /* Reset PCIE errors for debug */
6077         REG_WR(bp, 0x2114, 0xffffffff);
6078         REG_WR(bp, 0x2120, 0xffffffff);
6079
6080         return 0;
6081 }
6082
6083 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6084 {
6085         int i, rc = 0;
6086
6087         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6088            BP_FUNC(bp), load_code);
6089
6090         bp->dmae_ready = 0;
6091         mutex_init(&bp->dmae_mutex);
6092         bnx2x_gunzip_init(bp);
6093
6094         switch (load_code) {
6095         case FW_MSG_CODE_DRV_LOAD_COMMON:
6096                 rc = bnx2x_init_common(bp);
6097                 if (rc)
6098                         goto init_hw_err;
6099                 /* no break */
6100
6101         case FW_MSG_CODE_DRV_LOAD_PORT:
6102                 bp->dmae_ready = 1;
6103                 rc = bnx2x_init_port(bp);
6104                 if (rc)
6105                         goto init_hw_err;
6106                 /* no break */
6107
6108         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6109                 bp->dmae_ready = 1;
6110                 rc = bnx2x_init_func(bp);
6111                 if (rc)
6112                         goto init_hw_err;
6113                 break;
6114
6115         default:
6116                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6117                 break;
6118         }
6119
6120         if (!BP_NOMCP(bp)) {
6121                 int func = BP_FUNC(bp);
6122
6123                 bp->fw_drv_pulse_wr_seq =
6124                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6125                                  DRV_PULSE_SEQ_MASK);
6126                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6127                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
6128                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
6129         } else
6130                 bp->func_stx = 0;
6131
6132         /* this needs to be done before gunzip end */
6133         bnx2x_zero_def_sb(bp);
6134         for_each_queue(bp, i)
6135                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6136
6137 init_hw_err:
6138         bnx2x_gunzip_end(bp);
6139
6140         return rc;
6141 }
6142
6143 /* send the MCP a request, block until there is a reply */
6144 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6145 {
6146         int func = BP_FUNC(bp);
6147         u32 seq = ++bp->fw_seq;
6148         u32 rc = 0;
6149         u32 cnt = 1;
6150         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6151
6152         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6153         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6154
6155         do {
6156                 /* let the FW do it's magic ... */
6157                 msleep(delay);
6158
6159                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6160
6161                 /* Give the FW up to 2 second (200*10ms) */
6162         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6163
6164         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6165            cnt*delay, rc, seq);
6166
6167         /* is this a reply to our command? */
6168         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6169                 rc &= FW_MSG_CODE_MASK;
6170
6171         } else {
6172                 /* FW BUG! */
6173                 BNX2X_ERR("FW failed to respond!\n");
6174                 bnx2x_fw_dump(bp);
6175                 rc = 0;
6176         }
6177
6178         return rc;
6179 }
6180
6181 static void bnx2x_free_mem(struct bnx2x *bp)
6182 {
6183
6184 #define BNX2X_PCI_FREE(x, y, size) \
6185         do { \
6186                 if (x) { \
6187                         pci_free_consistent(bp->pdev, size, x, y); \
6188                         x = NULL; \
6189                         y = 0; \
6190                 } \
6191         } while (0)
6192
6193 #define BNX2X_FREE(x) \
6194         do { \
6195                 if (x) { \
6196                         vfree(x); \
6197                         x = NULL; \
6198                 } \
6199         } while (0)
6200
6201         int i;
6202
6203         /* fastpath */
6204         /* Common */
6205         for_each_queue(bp, i) {
6206
6207                 /* status blocks */
6208                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6209                                bnx2x_fp(bp, i, status_blk_mapping),
6210                                sizeof(struct host_status_block) +
6211                                sizeof(struct eth_tx_db_data));
6212         }
6213         /* Rx */
6214         for_each_rx_queue(bp, i) {
6215
6216                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6217                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6218                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6219                                bnx2x_fp(bp, i, rx_desc_mapping),
6220                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6221
6222                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6223                                bnx2x_fp(bp, i, rx_comp_mapping),
6224                                sizeof(struct eth_fast_path_rx_cqe) *
6225                                NUM_RCQ_BD);
6226
6227                 /* SGE ring */
6228                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6229                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6230                                bnx2x_fp(bp, i, rx_sge_mapping),
6231                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6232         }
6233         /* Tx */
6234         for_each_tx_queue(bp, i) {
6235
6236                 /* fastpath tx rings: tx_buf tx_desc */
6237                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6238                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6239                                bnx2x_fp(bp, i, tx_desc_mapping),
6240                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
6241         }
6242         /* end of fastpath */
6243
6244         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6245                        sizeof(struct host_def_status_block));
6246
6247         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6248                        sizeof(struct bnx2x_slowpath));
6249
6250 #ifdef BCM_ISCSI
6251         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6252         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6253         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6254         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6255 #endif
6256         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6257
6258 #undef BNX2X_PCI_FREE
6259 #undef BNX2X_KFREE
6260 }
6261
6262 static int bnx2x_alloc_mem(struct bnx2x *bp)
6263 {
6264
6265 #define BNX2X_PCI_ALLOC(x, y, size) \
6266         do { \
6267                 x = pci_alloc_consistent(bp->pdev, size, y); \
6268                 if (x == NULL) \
6269                         goto alloc_mem_err; \
6270                 memset(x, 0, size); \
6271         } while (0)
6272
6273 #define BNX2X_ALLOC(x, size) \
6274         do { \
6275                 x = vmalloc(size); \
6276                 if (x == NULL) \
6277                         goto alloc_mem_err; \
6278                 memset(x, 0, size); \
6279         } while (0)
6280
6281         int i;
6282
6283         /* fastpath */
6284         /* Common */
6285         for_each_queue(bp, i) {
6286                 bnx2x_fp(bp, i, bp) = bp;
6287
6288                 /* status blocks */
6289                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6290                                 &bnx2x_fp(bp, i, status_blk_mapping),
6291                                 sizeof(struct host_status_block) +
6292                                 sizeof(struct eth_tx_db_data));
6293         }
6294         /* Rx */
6295         for_each_rx_queue(bp, i) {
6296
6297                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6298                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6299                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6300                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6301                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6302                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6303
6304                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6305                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6306                                 sizeof(struct eth_fast_path_rx_cqe) *
6307                                 NUM_RCQ_BD);
6308
6309                 /* SGE ring */
6310                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6311                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6312                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6313                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6314                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6315         }
6316         /* Tx */
6317         for_each_tx_queue(bp, i) {
6318
6319                 bnx2x_fp(bp, i, hw_tx_prods) =
6320                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6321
6322                 bnx2x_fp(bp, i, tx_prods_mapping) =
6323                                 bnx2x_fp(bp, i, status_blk_mapping) +
6324                                 sizeof(struct host_status_block);
6325
6326                 /* fastpath tx rings: tx_buf tx_desc */
6327                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6328                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6329                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6330                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6331                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6332         }
6333         /* end of fastpath */
6334
6335         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6336                         sizeof(struct host_def_status_block));
6337
6338         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6339                         sizeof(struct bnx2x_slowpath));
6340
6341 #ifdef BCM_ISCSI
6342         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6343
6344         /* Initialize T1 */
6345         for (i = 0; i < 64*1024; i += 64) {
6346                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6347                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6348         }
6349
6350         /* allocate searcher T2 table
6351            we allocate 1/4 of alloc num for T2
6352           (which is not entered into the ILT) */
6353         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6354
6355         /* Initialize T2 */
6356         for (i = 0; i < 16*1024; i += 64)
6357                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6358
6359         /* now fixup the last line in the block to point to the next block */
6360         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6361
6362         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6363         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6364
6365         /* QM queues (128*MAX_CONN) */
6366         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6367 #endif
6368
6369         /* Slow path ring */
6370         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6371
6372         return 0;
6373
6374 alloc_mem_err:
6375         bnx2x_free_mem(bp);
6376         return -ENOMEM;
6377
6378 #undef BNX2X_PCI_ALLOC
6379 #undef BNX2X_ALLOC
6380 }
6381
6382 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6383 {
6384         int i;
6385
6386         for_each_tx_queue(bp, i) {
6387                 struct bnx2x_fastpath *fp = &bp->fp[i];
6388
6389                 u16 bd_cons = fp->tx_bd_cons;
6390                 u16 sw_prod = fp->tx_pkt_prod;
6391                 u16 sw_cons = fp->tx_pkt_cons;
6392
6393                 while (sw_cons != sw_prod) {
6394                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6395                         sw_cons++;
6396                 }
6397         }
6398 }
6399
6400 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6401 {
6402         int i, j;
6403
6404         for_each_rx_queue(bp, j) {
6405                 struct bnx2x_fastpath *fp = &bp->fp[j];
6406
6407                 for (i = 0; i < NUM_RX_BD; i++) {
6408                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6409                         struct sk_buff *skb = rx_buf->skb;
6410
6411                         if (skb == NULL)
6412                                 continue;
6413
6414                         pci_unmap_single(bp->pdev,
6415                                          pci_unmap_addr(rx_buf, mapping),
6416                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6417
6418                         rx_buf->skb = NULL;
6419                         dev_kfree_skb(skb);
6420                 }
6421                 if (!fp->disable_tpa)
6422                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6423                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6424                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6425         }
6426 }
6427
6428 static void bnx2x_free_skbs(struct bnx2x *bp)
6429 {
6430         bnx2x_free_tx_skbs(bp);
6431         bnx2x_free_rx_skbs(bp);
6432 }
6433
6434 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6435 {
6436         int i, offset = 1;
6437
6438         free_irq(bp->msix_table[0].vector, bp->dev);
6439         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6440            bp->msix_table[0].vector);
6441
6442         for_each_queue(bp, i) {
6443                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6444                    "state %x\n", i, bp->msix_table[i + offset].vector,
6445                    bnx2x_fp(bp, i, state));
6446
6447                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6448         }
6449 }
6450
6451 static void bnx2x_free_irq(struct bnx2x *bp)
6452 {
6453         if (bp->flags & USING_MSIX_FLAG) {
6454                 bnx2x_free_msix_irqs(bp);
6455                 pci_disable_msix(bp->pdev);
6456                 bp->flags &= ~USING_MSIX_FLAG;
6457
6458         } else if (bp->flags & USING_MSI_FLAG) {
6459                 free_irq(bp->pdev->irq, bp->dev);
6460                 pci_disable_msi(bp->pdev);
6461                 bp->flags &= ~USING_MSI_FLAG;
6462
6463         } else
6464                 free_irq(bp->pdev->irq, bp->dev);
6465 }
6466
6467 static int bnx2x_enable_msix(struct bnx2x *bp)
6468 {
6469         int i, rc, offset = 1;
6470         int igu_vec = 0;
6471
6472         bp->msix_table[0].entry = igu_vec;
6473         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6474
6475         for_each_queue(bp, i) {
6476                 igu_vec = BP_L_ID(bp) + offset + i;
6477                 bp->msix_table[i + offset].entry = igu_vec;
6478                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6479                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6480         }
6481
6482         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6483                              BNX2X_NUM_QUEUES(bp) + offset);
6484         if (rc) {
6485                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6486                 return rc;
6487         }
6488
6489         bp->flags |= USING_MSIX_FLAG;
6490
6491         return 0;
6492 }
6493
6494 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6495 {
6496         int i, rc, offset = 1;
6497
6498         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6499                          bp->dev->name, bp->dev);
6500         if (rc) {
6501                 BNX2X_ERR("request sp irq failed\n");
6502                 return -EBUSY;
6503         }
6504
6505         for_each_queue(bp, i) {
6506                 struct bnx2x_fastpath *fp = &bp->fp[i];
6507
6508                 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6509                 rc = request_irq(bp->msix_table[i + offset].vector,
6510                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6511                 if (rc) {
6512                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6513                         bnx2x_free_msix_irqs(bp);
6514                         return -EBUSY;
6515                 }
6516
6517                 fp->state = BNX2X_FP_STATE_IRQ;
6518         }
6519
6520         i = BNX2X_NUM_QUEUES(bp);
6521         if (is_multi(bp))
6522                 printk(KERN_INFO PFX
6523                        "%s: using MSI-X  IRQs: sp %d  fp %d - %d\n",
6524                        bp->dev->name, bp->msix_table[0].vector,
6525                        bp->msix_table[offset].vector,
6526                        bp->msix_table[offset + i - 1].vector);
6527         else
6528                 printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp %d\n",
6529                        bp->dev->name, bp->msix_table[0].vector,
6530                        bp->msix_table[offset + i - 1].vector);
6531
6532         return 0;
6533 }
6534
6535 static int bnx2x_enable_msi(struct bnx2x *bp)
6536 {
6537         int rc;
6538
6539         rc = pci_enable_msi(bp->pdev);
6540         if (rc) {
6541                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6542                 return -1;
6543         }
6544         bp->flags |= USING_MSI_FLAG;
6545
6546         return 0;
6547 }
6548
6549 static int bnx2x_req_irq(struct bnx2x *bp)
6550 {
6551         unsigned long flags;
6552         int rc;
6553
6554         if (bp->flags & USING_MSI_FLAG)
6555                 flags = 0;
6556         else
6557                 flags = IRQF_SHARED;
6558
6559         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6560                          bp->dev->name, bp->dev);
6561         if (!rc)
6562                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6563
6564         return rc;
6565 }
6566
6567 static void bnx2x_napi_enable(struct bnx2x *bp)
6568 {
6569         int i;
6570
6571         for_each_rx_queue(bp, i)
6572                 napi_enable(&bnx2x_fp(bp, i, napi));
6573 }
6574
6575 static void bnx2x_napi_disable(struct bnx2x *bp)
6576 {
6577         int i;
6578
6579         for_each_rx_queue(bp, i)
6580                 napi_disable(&bnx2x_fp(bp, i, napi));
6581 }
6582
6583 static void bnx2x_netif_start(struct bnx2x *bp)
6584 {
6585         if (atomic_dec_and_test(&bp->intr_sem)) {
6586                 if (netif_running(bp->dev)) {
6587                         bnx2x_napi_enable(bp);
6588                         bnx2x_int_enable(bp);
6589                         if (bp->state == BNX2X_STATE_OPEN)
6590                                 netif_tx_wake_all_queues(bp->dev);
6591                 }
6592         }
6593 }
6594
6595 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6596 {
6597         bnx2x_int_disable_sync(bp, disable_hw);
6598         bnx2x_napi_disable(bp);
6599         netif_tx_disable(bp->dev);
6600         bp->dev->trans_start = jiffies; /* prevent tx timeout */
6601 }
6602
6603 /*
6604  * Init service functions
6605  */
6606
6607 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6608 {
6609         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6610         int port = BP_PORT(bp);
6611
6612         /* CAM allocation
6613          * unicasts 0-31:port0 32-63:port1
6614          * multicast 64-127:port0 128-191:port1
6615          */
6616         config->hdr.length = 2;
6617         config->hdr.offset = port ? 32 : 0;
6618         config->hdr.client_id = bp->fp->cl_id;
6619         config->hdr.reserved1 = 0;
6620
6621         /* primary MAC */
6622         config->config_table[0].cam_entry.msb_mac_addr =
6623                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6624         config->config_table[0].cam_entry.middle_mac_addr =
6625                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6626         config->config_table[0].cam_entry.lsb_mac_addr =
6627                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6628         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6629         if (set)
6630                 config->config_table[0].target_table_entry.flags = 0;
6631         else
6632                 CAM_INVALIDATE(config->config_table[0]);
6633         config->config_table[0].target_table_entry.client_id = 0;
6634         config->config_table[0].target_table_entry.vlan_id = 0;
6635
6636         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6637            (set ? "setting" : "clearing"),
6638            config->config_table[0].cam_entry.msb_mac_addr,
6639            config->config_table[0].cam_entry.middle_mac_addr,
6640            config->config_table[0].cam_entry.lsb_mac_addr);
6641
6642         /* broadcast */
6643         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6644         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6645         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6646         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6647         if (set)
6648                 config->config_table[1].target_table_entry.flags =
6649                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6650         else
6651                 CAM_INVALIDATE(config->config_table[1]);
6652         config->config_table[1].target_table_entry.client_id = 0;
6653         config->config_table[1].target_table_entry.vlan_id = 0;
6654
6655         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6656                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6657                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6658 }
6659
6660 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6661 {
6662         struct mac_configuration_cmd_e1h *config =
6663                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6664
6665         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6666                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6667                 return;
6668         }
6669
6670         /* CAM allocation for E1H
6671          * unicasts: by func number
6672          * multicast: 20+FUNC*20, 20 each
6673          */
6674         config->hdr.length = 1;
6675         config->hdr.offset = BP_FUNC(bp);
6676         config->hdr.client_id = bp->fp->cl_id;
6677         config->hdr.reserved1 = 0;
6678
6679         /* primary MAC */
6680         config->config_table[0].msb_mac_addr =
6681                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6682         config->config_table[0].middle_mac_addr =
6683                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6684         config->config_table[0].lsb_mac_addr =
6685                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6686         config->config_table[0].client_id = BP_L_ID(bp);
6687         config->config_table[0].vlan_id = 0;
6688         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6689         if (set)
6690                 config->config_table[0].flags = BP_PORT(bp);
6691         else
6692                 config->config_table[0].flags =
6693                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6694
6695         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6696            (set ? "setting" : "clearing"),
6697            config->config_table[0].msb_mac_addr,
6698            config->config_table[0].middle_mac_addr,
6699            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6700
6701         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6702                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6703                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6704 }
6705
6706 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6707                              int *state_p, int poll)
6708 {
6709         /* can take a while if any port is running */
6710         int cnt = 5000;
6711
6712         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6713            poll ? "polling" : "waiting", state, idx);
6714
6715         might_sleep();
6716         while (cnt--) {
6717                 if (poll) {
6718                         bnx2x_rx_int(bp->fp, 10);
6719                         /* if index is different from 0
6720                          * the reply for some commands will
6721                          * be on the non default queue
6722                          */
6723                         if (idx)
6724                                 bnx2x_rx_int(&bp->fp[idx], 10);
6725                 }
6726
6727                 mb(); /* state is changed by bnx2x_sp_event() */
6728                 if (*state_p == state) {
6729 #ifdef BNX2X_STOP_ON_ERROR
6730                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6731 #endif
6732                         return 0;
6733                 }
6734
6735                 msleep(1);
6736         }
6737
6738         /* timeout! */
6739         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6740                   poll ? "polling" : "waiting", state, idx);
6741 #ifdef BNX2X_STOP_ON_ERROR
6742         bnx2x_panic();
6743 #endif
6744
6745         return -EBUSY;
6746 }
6747
6748 static int bnx2x_setup_leading(struct bnx2x *bp)
6749 {
6750         int rc;
6751
6752         /* reset IGU state */
6753         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6754
6755         /* SETUP ramrod */
6756         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6757
6758         /* Wait for completion */
6759         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6760
6761         return rc;
6762 }
6763
6764 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6765 {
6766         struct bnx2x_fastpath *fp = &bp->fp[index];
6767
6768         /* reset IGU state */
6769         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6770
6771         /* SETUP ramrod */
6772         fp->state = BNX2X_FP_STATE_OPENING;
6773         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6774                       fp->cl_id, 0);
6775
6776         /* Wait for completion */
6777         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6778                                  &(fp->state), 0);
6779 }
6780
6781 static int bnx2x_poll(struct napi_struct *napi, int budget);
6782
6783 static void bnx2x_set_int_mode(struct bnx2x *bp)
6784 {
6785         int num_queues;
6786
6787         switch (int_mode) {
6788         case INT_MODE_INTx:
6789         case INT_MODE_MSI:
6790                 num_queues = 1;
6791                 bp->num_rx_queues = num_queues;
6792                 bp->num_tx_queues = num_queues;
6793                 DP(NETIF_MSG_IFUP,
6794                    "set number of queues to %d\n", num_queues);
6795                 break;
6796
6797         case INT_MODE_MSIX:
6798         default:
6799                 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6800                         num_queues = min_t(u32, num_online_cpus(),
6801                                            BNX2X_MAX_QUEUES(bp));
6802                 else
6803                         num_queues = 1;
6804                 bp->num_rx_queues = num_queues;
6805                 bp->num_tx_queues = num_queues;
6806                 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6807                    "  number of tx queues to %d\n",
6808                    bp->num_rx_queues, bp->num_tx_queues);
6809                 /* if we can't use MSI-X we only need one fp,
6810                  * so try to enable MSI-X with the requested number of fp's
6811                  * and fallback to MSI or legacy INTx with one fp
6812                  */
6813                 if (bnx2x_enable_msix(bp)) {
6814                         /* failed to enable MSI-X */
6815                         num_queues = 1;
6816                         bp->num_rx_queues = num_queues;
6817                         bp->num_tx_queues = num_queues;
6818                         if (bp->multi_mode)
6819                                 BNX2X_ERR("Multi requested but failed to "
6820                                           "enable MSI-X  set number of "
6821                                           "queues to %d\n", num_queues);
6822                 }
6823                 break;
6824         }
6825         bp->dev->real_num_tx_queues = bp->num_tx_queues;
6826 }
6827
6828 static void bnx2x_set_rx_mode(struct net_device *dev);
6829
6830 /* must be called with rtnl_lock */
6831 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6832 {
6833         u32 load_code;
6834         int i, rc = 0;
6835 #ifdef BNX2X_STOP_ON_ERROR
6836         DP(NETIF_MSG_IFUP, "enter  load_mode %d\n", load_mode);
6837         if (unlikely(bp->panic))
6838                 return -EPERM;
6839 #endif
6840
6841         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6842
6843         bnx2x_set_int_mode(bp);
6844
6845         if (bnx2x_alloc_mem(bp))
6846                 return -ENOMEM;
6847
6848         for_each_rx_queue(bp, i)
6849                 bnx2x_fp(bp, i, disable_tpa) =
6850                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6851
6852         for_each_rx_queue(bp, i)
6853                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6854                                bnx2x_poll, 128);
6855
6856 #ifdef BNX2X_STOP_ON_ERROR
6857         for_each_rx_queue(bp, i) {
6858                 struct bnx2x_fastpath *fp = &bp->fp[i];
6859
6860                 fp->poll_no_work = 0;
6861                 fp->poll_calls = 0;
6862                 fp->poll_max_calls = 0;
6863                 fp->poll_complete = 0;
6864                 fp->poll_exit = 0;
6865         }
6866 #endif
6867         bnx2x_napi_enable(bp);
6868
6869         if (bp->flags & USING_MSIX_FLAG) {
6870                 rc = bnx2x_req_msix_irqs(bp);
6871                 if (rc) {
6872                         pci_disable_msix(bp->pdev);
6873                         goto load_error1;
6874                 }
6875         } else {
6876                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6877                         bnx2x_enable_msi(bp);
6878                 bnx2x_ack_int(bp);
6879                 rc = bnx2x_req_irq(bp);
6880                 if (rc) {
6881                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6882                         if (bp->flags & USING_MSI_FLAG)
6883                                 pci_disable_msi(bp->pdev);
6884                         goto load_error1;
6885                 }
6886                 if (bp->flags & USING_MSI_FLAG) {
6887                         bp->dev->irq = bp->pdev->irq;
6888                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
6889                                bp->dev->name, bp->pdev->irq);
6890                 }
6891         }
6892
6893         /* Send LOAD_REQUEST command to MCP
6894            Returns the type of LOAD command:
6895            if it is the first port to be initialized
6896            common blocks should be initialized, otherwise - not
6897         */
6898         if (!BP_NOMCP(bp)) {
6899                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6900                 if (!load_code) {
6901                         BNX2X_ERR("MCP response failure, aborting\n");
6902                         rc = -EBUSY;
6903                         goto load_error2;
6904                 }
6905                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6906                         rc = -EBUSY; /* other port in diagnostic mode */
6907                         goto load_error2;
6908                 }
6909
6910         } else {
6911                 int port = BP_PORT(bp);
6912
6913                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
6914                    load_count[0], load_count[1], load_count[2]);
6915                 load_count[0]++;
6916                 load_count[1 + port]++;
6917                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
6918                    load_count[0], load_count[1], load_count[2]);
6919                 if (load_count[0] == 1)
6920                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6921                 else if (load_count[1 + port] == 1)
6922                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6923                 else
6924                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6925         }
6926
6927         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6928             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6929                 bp->port.pmf = 1;
6930         else
6931                 bp->port.pmf = 0;
6932         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6933
6934         /* Initialize HW */
6935         rc = bnx2x_init_hw(bp, load_code);
6936         if (rc) {
6937                 BNX2X_ERR("HW init failed, aborting\n");
6938                 goto load_error2;
6939         }
6940
6941         /* Setup NIC internals and enable interrupts */
6942         bnx2x_nic_init(bp, load_code);
6943
6944         /* Send LOAD_DONE command to MCP */
6945         if (!BP_NOMCP(bp)) {
6946                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6947                 if (!load_code) {
6948                         BNX2X_ERR("MCP response failure, aborting\n");
6949                         rc = -EBUSY;
6950                         goto load_error3;
6951                 }
6952         }
6953
6954         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6955
6956         rc = bnx2x_setup_leading(bp);
6957         if (rc) {
6958                 BNX2X_ERR("Setup leading failed!\n");
6959                 goto load_error3;
6960         }
6961
6962         if (CHIP_IS_E1H(bp))
6963                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6964                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
6965                         bp->state = BNX2X_STATE_DISABLED;
6966                 }
6967
6968         if (bp->state == BNX2X_STATE_OPEN)
6969                 for_each_nondefault_queue(bp, i) {
6970                         rc = bnx2x_setup_multi(bp, i);
6971                         if (rc)
6972                                 goto load_error3;
6973                 }
6974
6975         if (CHIP_IS_E1(bp))
6976                 bnx2x_set_mac_addr_e1(bp, 1);
6977         else
6978                 bnx2x_set_mac_addr_e1h(bp, 1);
6979
6980         if (bp->port.pmf)
6981                 bnx2x_initial_phy_init(bp, load_mode);
6982
6983         /* Start fast path */
6984         switch (load_mode) {
6985         case LOAD_NORMAL:
6986                 /* Tx queue should be only reenabled */
6987                 netif_tx_wake_all_queues(bp->dev);
6988                 /* Initialize the receive filter. */
6989                 bnx2x_set_rx_mode(bp->dev);
6990                 break;
6991
6992         case LOAD_OPEN:
6993                 netif_tx_start_all_queues(bp->dev);
6994                 /* Initialize the receive filter. */
6995                 bnx2x_set_rx_mode(bp->dev);
6996                 break;
6997
6998         case LOAD_DIAG:
6999                 /* Initialize the receive filter. */
7000                 bnx2x_set_rx_mode(bp->dev);
7001                 bp->state = BNX2X_STATE_DIAG;
7002                 break;
7003
7004         default:
7005                 break;
7006         }
7007
7008         if (!bp->port.pmf)
7009                 bnx2x__link_status_update(bp);
7010
7011         /* start the timer */
7012         mod_timer(&bp->timer, jiffies + bp->current_interval);
7013
7014
7015         return 0;
7016
7017 load_error3:
7018         bnx2x_int_disable_sync(bp, 1);
7019         if (!BP_NOMCP(bp)) {
7020                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7021                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7022         }
7023         bp->port.pmf = 0;
7024         /* Free SKBs, SGEs, TPA pool and driver internals */
7025         bnx2x_free_skbs(bp);
7026         for_each_rx_queue(bp, i)
7027                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7028 load_error2:
7029         /* Release IRQs */
7030         bnx2x_free_irq(bp);
7031 load_error1:
7032         bnx2x_napi_disable(bp);
7033         for_each_rx_queue(bp, i)
7034                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7035         bnx2x_free_mem(bp);
7036
7037         return rc;
7038 }
7039
7040 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7041 {
7042         struct bnx2x_fastpath *fp = &bp->fp[index];
7043         int rc;
7044
7045         /* halt the connection */
7046         fp->state = BNX2X_FP_STATE_HALTING;
7047         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7048
7049         /* Wait for completion */
7050         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7051                                &(fp->state), 1);
7052         if (rc) /* timeout */
7053                 return rc;
7054
7055         /* delete cfc entry */
7056         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7057
7058         /* Wait for completion */
7059         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7060                                &(fp->state), 1);
7061         return rc;
7062 }
7063
7064 static int bnx2x_stop_leading(struct bnx2x *bp)
7065 {
7066         __le16 dsb_sp_prod_idx;
7067         /* if the other port is handling traffic,
7068            this can take a lot of time */
7069         int cnt = 500;
7070         int rc;
7071
7072         might_sleep();
7073
7074         /* Send HALT ramrod */
7075         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7076         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7077
7078         /* Wait for completion */
7079         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7080                                &(bp->fp[0].state), 1);
7081         if (rc) /* timeout */
7082                 return rc;
7083
7084         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7085
7086         /* Send PORT_DELETE ramrod */
7087         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7088
7089         /* Wait for completion to arrive on default status block
7090            we are going to reset the chip anyway
7091            so there is not much to do if this times out
7092          */
7093         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7094                 if (!cnt) {
7095                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7096                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7097                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7098 #ifdef BNX2X_STOP_ON_ERROR
7099                         bnx2x_panic();
7100 #endif
7101                         rc = -EBUSY;
7102                         break;
7103                 }
7104                 cnt--;
7105                 msleep(1);
7106                 rmb(); /* Refresh the dsb_sp_prod */
7107         }
7108         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7109         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7110
7111         return rc;
7112 }
7113
7114 static void bnx2x_reset_func(struct bnx2x *bp)
7115 {
7116         int port = BP_PORT(bp);
7117         int func = BP_FUNC(bp);
7118         int base, i;
7119
7120         /* Configure IGU */
7121         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7122         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7123
7124         /* Clear ILT */
7125         base = FUNC_ILT_BASE(func);
7126         for (i = base; i < base + ILT_PER_FUNC; i++)
7127                 bnx2x_ilt_wr(bp, i, 0);
7128 }
7129
7130 static void bnx2x_reset_port(struct bnx2x *bp)
7131 {
7132         int port = BP_PORT(bp);
7133         u32 val;
7134
7135         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7136
7137         /* Do not rcv packets to BRB */
7138         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7139         /* Do not direct rcv packets that are not for MCP to the BRB */
7140         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7141                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7142
7143         /* Configure AEU */
7144         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7145
7146         msleep(100);
7147         /* Check for BRB port occupancy */
7148         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7149         if (val)
7150                 DP(NETIF_MSG_IFDOWN,
7151                    "BRB1 is not empty  %d blocks are occupied\n", val);
7152
7153         /* TODO: Close Doorbell port? */
7154 }
7155
7156 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7157 {
7158         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7159            BP_FUNC(bp), reset_code);
7160
7161         switch (reset_code) {
7162         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7163                 bnx2x_reset_port(bp);
7164                 bnx2x_reset_func(bp);
7165                 bnx2x_reset_common(bp);
7166                 break;
7167
7168         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7169                 bnx2x_reset_port(bp);
7170                 bnx2x_reset_func(bp);
7171                 break;
7172
7173         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7174                 bnx2x_reset_func(bp);
7175                 break;
7176
7177         default:
7178                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7179                 break;
7180         }
7181 }
7182
7183 /* must be called with rtnl_lock */
7184 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7185 {
7186         int port = BP_PORT(bp);
7187         u32 reset_code = 0;
7188         int i, cnt, rc;
7189
7190         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7191
7192         bp->rx_mode = BNX2X_RX_MODE_NONE;
7193         bnx2x_set_storm_rx_mode(bp);
7194
7195         bnx2x_netif_stop(bp, 1);
7196
7197         del_timer_sync(&bp->timer);
7198         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7199                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7200         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7201
7202         /* Release IRQs */
7203         bnx2x_free_irq(bp);
7204
7205         /* Wait until tx fastpath tasks complete */
7206         for_each_tx_queue(bp, i) {
7207                 struct bnx2x_fastpath *fp = &bp->fp[i];
7208
7209                 cnt = 1000;
7210                 while (bnx2x_has_tx_work_unload(fp)) {
7211
7212                         bnx2x_tx_int(fp);
7213                         if (!cnt) {
7214                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7215                                           i);
7216 #ifdef BNX2X_STOP_ON_ERROR
7217                                 bnx2x_panic();
7218                                 return -EBUSY;
7219 #else
7220                                 break;
7221 #endif
7222                         }
7223                         cnt--;
7224                         msleep(1);
7225                 }
7226         }
7227         /* Give HW time to discard old tx messages */
7228         msleep(1);
7229
7230         if (CHIP_IS_E1(bp)) {
7231                 struct mac_configuration_cmd *config =
7232                                                 bnx2x_sp(bp, mcast_config);
7233
7234                 bnx2x_set_mac_addr_e1(bp, 0);
7235
7236                 for (i = 0; i < config->hdr.length; i++)
7237                         CAM_INVALIDATE(config->config_table[i]);
7238
7239                 config->hdr.length = i;
7240                 if (CHIP_REV_IS_SLOW(bp))
7241                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7242                 else
7243                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7244                 config->hdr.client_id = bp->fp->cl_id;
7245                 config->hdr.reserved1 = 0;
7246
7247                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7248                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7249                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7250
7251         } else { /* E1H */
7252                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7253
7254                 bnx2x_set_mac_addr_e1h(bp, 0);
7255
7256                 for (i = 0; i < MC_HASH_SIZE; i++)
7257                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7258         }
7259
7260         if (unload_mode == UNLOAD_NORMAL)
7261                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7262
7263         else if (bp->flags & NO_WOL_FLAG) {
7264                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7265                 if (CHIP_IS_E1H(bp))
7266                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7267
7268         } else if (bp->wol) {
7269                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7270                 u8 *mac_addr = bp->dev->dev_addr;
7271                 u32 val;
7272                 /* The mac address is written to entries 1-4 to
7273                    preserve entry 0 which is used by the PMF */
7274                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7275
7276                 val = (mac_addr[0] << 8) | mac_addr[1];
7277                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7278
7279                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7280                       (mac_addr[4] << 8) | mac_addr[5];
7281                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7282
7283                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7284
7285         } else
7286                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7287
7288         /* Close multi and leading connections
7289            Completions for ramrods are collected in a synchronous way */
7290         for_each_nondefault_queue(bp, i)
7291                 if (bnx2x_stop_multi(bp, i))
7292                         goto unload_error;
7293
7294         rc = bnx2x_stop_leading(bp);
7295         if (rc) {
7296                 BNX2X_ERR("Stop leading failed!\n");
7297 #ifdef BNX2X_STOP_ON_ERROR
7298                 return -EBUSY;
7299 #else
7300                 goto unload_error;
7301 #endif
7302         }
7303
7304 unload_error:
7305         if (!BP_NOMCP(bp))
7306                 reset_code = bnx2x_fw_command(bp, reset_code);
7307         else {
7308                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7309                    load_count[0], load_count[1], load_count[2]);
7310                 load_count[0]--;
7311                 load_count[1 + port]--;
7312                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7313                    load_count[0], load_count[1], load_count[2]);
7314                 if (load_count[0] == 0)
7315                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7316                 else if (load_count[1 + port] == 0)
7317                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7318                 else
7319                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7320         }
7321
7322         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7323             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7324                 bnx2x__link_reset(bp);
7325
7326         /* Reset the chip */
7327         bnx2x_reset_chip(bp, reset_code);
7328
7329         /* Report UNLOAD_DONE to MCP */
7330         if (!BP_NOMCP(bp))
7331                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7332
7333         bp->port.pmf = 0;
7334
7335         /* Free SKBs, SGEs, TPA pool and driver internals */
7336         bnx2x_free_skbs(bp);
7337         for_each_rx_queue(bp, i)
7338                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7339         for_each_rx_queue(bp, i)
7340                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7341         bnx2x_free_mem(bp);
7342
7343         bp->state = BNX2X_STATE_CLOSED;
7344
7345         netif_carrier_off(bp->dev);
7346
7347         return 0;
7348 }
7349
7350 static void bnx2x_reset_task(struct work_struct *work)
7351 {
7352         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7353
7354 #ifdef BNX2X_STOP_ON_ERROR
7355         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7356                   " so reset not done to allow debug dump,\n"
7357          KERN_ERR " you will need to reboot when done\n");
7358         return;
7359 #endif
7360
7361         rtnl_lock();
7362
7363         if (!netif_running(bp->dev))
7364                 goto reset_task_exit;
7365
7366         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7367         bnx2x_nic_load(bp, LOAD_NORMAL);
7368
7369 reset_task_exit:
7370         rtnl_unlock();
7371 }
7372
7373 /* end of nic load/unload */
7374
7375 /* ethtool_ops */
7376
7377 /*
7378  * Init service functions
7379  */
7380
7381 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7382 {
7383         switch (func) {
7384         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7385         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7386         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7387         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7388         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7389         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7390         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7391         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7392         default:
7393                 BNX2X_ERR("Unsupported function index: %d\n", func);
7394                 return (u32)(-1);
7395         }
7396 }
7397
7398 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7399 {
7400         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7401
7402         /* Flush all outstanding writes */
7403         mmiowb();
7404
7405         /* Pretend to be function 0 */
7406         REG_WR(bp, reg, 0);
7407         /* Flush the GRC transaction (in the chip) */
7408         new_val = REG_RD(bp, reg);
7409         if (new_val != 0) {
7410                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7411                           new_val);
7412                 BUG();
7413         }
7414
7415         /* From now we are in the "like-E1" mode */
7416         bnx2x_int_disable(bp);
7417
7418         /* Flush all outstanding writes */
7419         mmiowb();
7420
7421         /* Restore the original funtion settings */
7422         REG_WR(bp, reg, orig_func);
7423         new_val = REG_RD(bp, reg);
7424         if (new_val != orig_func) {
7425                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7426                           orig_func, new_val);
7427                 BUG();
7428         }
7429 }
7430
7431 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7432 {
7433         if (CHIP_IS_E1H(bp))
7434                 bnx2x_undi_int_disable_e1h(bp, func);
7435         else
7436                 bnx2x_int_disable(bp);
7437 }
7438
7439 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7440 {
7441         u32 val;
7442
7443         /* Check if there is any driver already loaded */
7444         val = REG_RD(bp, MISC_REG_UNPREPARED);
7445         if (val == 0x1) {
7446                 /* Check if it is the UNDI driver
7447                  * UNDI driver initializes CID offset for normal bell to 0x7
7448                  */
7449                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7450                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7451                 if (val == 0x7) {
7452                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7453                         /* save our func */
7454                         int func = BP_FUNC(bp);
7455                         u32 swap_en;
7456                         u32 swap_val;
7457
7458                         /* clear the UNDI indication */
7459                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7460
7461                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7462
7463                         /* try unload UNDI on port 0 */
7464                         bp->func = 0;
7465                         bp->fw_seq =
7466                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7467                                 DRV_MSG_SEQ_NUMBER_MASK);
7468                         reset_code = bnx2x_fw_command(bp, reset_code);
7469
7470                         /* if UNDI is loaded on the other port */
7471                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7472
7473                                 /* send "DONE" for previous unload */
7474                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7475
7476                                 /* unload UNDI on port 1 */
7477                                 bp->func = 1;
7478                                 bp->fw_seq =
7479                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7480                                         DRV_MSG_SEQ_NUMBER_MASK);
7481                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7482
7483                                 bnx2x_fw_command(bp, reset_code);
7484                         }
7485
7486                         /* now it's safe to release the lock */
7487                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7488
7489                         bnx2x_undi_int_disable(bp, func);
7490
7491                         /* close input traffic and wait for it */
7492                         /* Do not rcv packets to BRB */
7493                         REG_WR(bp,
7494                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7495                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7496                         /* Do not direct rcv packets that are not for MCP to
7497                          * the BRB */
7498                         REG_WR(bp,
7499                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7500                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7501                         /* clear AEU */
7502                         REG_WR(bp,
7503                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7504                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7505                         msleep(10);
7506
7507                         /* save NIG port swap info */
7508                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7509                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7510                         /* reset device */
7511                         REG_WR(bp,
7512                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7513                                0xd3ffffff);
7514                         REG_WR(bp,
7515                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7516                                0x1403);
7517                         /* take the NIG out of reset and restore swap values */
7518                         REG_WR(bp,
7519                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7520                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7521                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7522                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7523
7524                         /* send unload done to the MCP */
7525                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7526
7527                         /* restore our func and fw_seq */
7528                         bp->func = func;
7529                         bp->fw_seq =
7530                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7531                                 DRV_MSG_SEQ_NUMBER_MASK);
7532
7533                 } else
7534                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7535         }
7536 }
7537
7538 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7539 {
7540         u32 val, val2, val3, val4, id;
7541         u16 pmc;
7542
7543         /* Get the chip revision id and number. */
7544         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7545         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7546         id = ((val & 0xffff) << 16);
7547         val = REG_RD(bp, MISC_REG_CHIP_REV);
7548         id |= ((val & 0xf) << 12);
7549         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7550         id |= ((val & 0xff) << 4);
7551         val = REG_RD(bp, MISC_REG_BOND_ID);
7552         id |= (val & 0xf);
7553         bp->common.chip_id = id;
7554         bp->link_params.chip_id = bp->common.chip_id;
7555         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7556
7557         val = (REG_RD(bp, 0x2874) & 0x55);
7558         if ((bp->common.chip_id & 0x1) ||
7559             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7560                 bp->flags |= ONE_PORT_FLAG;
7561                 BNX2X_DEV_INFO("single port device\n");
7562         }
7563
7564         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7565         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7566                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7567         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7568                        bp->common.flash_size, bp->common.flash_size);
7569
7570         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7571         bp->link_params.shmem_base = bp->common.shmem_base;
7572         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7573
7574         if (!bp->common.shmem_base ||
7575             (bp->common.shmem_base < 0xA0000) ||
7576             (bp->common.shmem_base >= 0xC0000)) {
7577                 BNX2X_DEV_INFO("MCP not active\n");
7578                 bp->flags |= NO_MCP_FLAG;
7579                 return;
7580         }
7581
7582         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7583         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7584                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7585                 BNX2X_ERR("BAD MCP validity signature\n");
7586
7587         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7588         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7589
7590         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7591                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7592                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7593
7594         bp->link_params.feature_config_flags = 0;
7595         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7596         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7597                 bp->link_params.feature_config_flags |=
7598                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7599         else
7600                 bp->link_params.feature_config_flags &=
7601                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7602
7603         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7604         bp->common.bc_ver = val;
7605         BNX2X_DEV_INFO("bc_ver %X\n", val);
7606         if (val < BNX2X_BC_VER) {
7607                 /* for now only warn
7608                  * later we might need to enforce this */
7609                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7610                           " please upgrade BC\n", BNX2X_BC_VER, val);
7611         }
7612
7613         if (BP_E1HVN(bp) == 0) {
7614                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7615                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7616         } else {
7617                 /* no WOL capability for E1HVN != 0 */
7618                 bp->flags |= NO_WOL_FLAG;
7619         }
7620         BNX2X_DEV_INFO("%sWoL capable\n",
7621                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7622
7623         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7624         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7625         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7626         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7627
7628         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7629                val, val2, val3, val4);
7630 }
7631
7632 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7633                                                     u32 switch_cfg)
7634 {
7635         int port = BP_PORT(bp);
7636         u32 ext_phy_type;
7637
7638         switch (switch_cfg) {
7639         case SWITCH_CFG_1G:
7640                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7641
7642                 ext_phy_type =
7643                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7644                 switch (ext_phy_type) {
7645                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7646                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7647                                        ext_phy_type);
7648
7649                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7650                                                SUPPORTED_10baseT_Full |
7651                                                SUPPORTED_100baseT_Half |
7652                                                SUPPORTED_100baseT_Full |
7653                                                SUPPORTED_1000baseT_Full |
7654                                                SUPPORTED_2500baseX_Full |
7655                                                SUPPORTED_TP |
7656                                                SUPPORTED_FIBRE |
7657                                                SUPPORTED_Autoneg |
7658                                                SUPPORTED_Pause |
7659                                                SUPPORTED_Asym_Pause);
7660                         break;
7661
7662                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7663                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7664                                        ext_phy_type);
7665
7666                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7667                                                SUPPORTED_10baseT_Full |
7668                                                SUPPORTED_100baseT_Half |
7669                                                SUPPORTED_100baseT_Full |
7670                                                SUPPORTED_1000baseT_Full |
7671                                                SUPPORTED_TP |
7672                                                SUPPORTED_FIBRE |
7673                                                SUPPORTED_Autoneg |
7674                                                SUPPORTED_Pause |
7675                                                SUPPORTED_Asym_Pause);
7676                         break;
7677
7678                 default:
7679                         BNX2X_ERR("NVRAM config error. "
7680                                   "BAD SerDes ext_phy_config 0x%x\n",
7681                                   bp->link_params.ext_phy_config);
7682                         return;
7683                 }
7684
7685                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7686                                            port*0x10);
7687                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7688                 break;
7689
7690         case SWITCH_CFG_10G:
7691                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7692
7693                 ext_phy_type =
7694                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7695                 switch (ext_phy_type) {
7696                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7697                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7698                                        ext_phy_type);
7699
7700                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7701                                                SUPPORTED_10baseT_Full |
7702                                                SUPPORTED_100baseT_Half |
7703                                                SUPPORTED_100baseT_Full |
7704                                                SUPPORTED_1000baseT_Full |
7705                                                SUPPORTED_2500baseX_Full |
7706                                                SUPPORTED_10000baseT_Full |
7707                                                SUPPORTED_TP |
7708                                                SUPPORTED_FIBRE |
7709                                                SUPPORTED_Autoneg |
7710                                                SUPPORTED_Pause |
7711                                                SUPPORTED_Asym_Pause);
7712                         break;
7713
7714                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7715                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7716                                        ext_phy_type);
7717
7718                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7719                                                SUPPORTED_1000baseT_Full |
7720                                                SUPPORTED_FIBRE |
7721                                                SUPPORTED_Autoneg |
7722                                                SUPPORTED_Pause |
7723                                                SUPPORTED_Asym_Pause);
7724                         break;
7725
7726                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7727                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7728                                        ext_phy_type);
7729
7730                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7731                                                SUPPORTED_2500baseX_Full |
7732                                                SUPPORTED_1000baseT_Full |
7733                                                SUPPORTED_FIBRE |
7734                                                SUPPORTED_Autoneg |
7735                                                SUPPORTED_Pause |
7736                                                SUPPORTED_Asym_Pause);
7737                         break;
7738
7739                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7740                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7741                                        ext_phy_type);
7742
7743                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7744                                                SUPPORTED_FIBRE |
7745                                                SUPPORTED_Pause |
7746                                                SUPPORTED_Asym_Pause);
7747                         break;
7748
7749                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7750                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7751                                        ext_phy_type);
7752
7753                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7754                                                SUPPORTED_1000baseT_Full |
7755                                                SUPPORTED_FIBRE |
7756                                                SUPPORTED_Pause |
7757                                                SUPPORTED_Asym_Pause);
7758                         break;
7759
7760                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7761                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7762                                        ext_phy_type);
7763
7764                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7765                                                SUPPORTED_1000baseT_Full |
7766                                                SUPPORTED_Autoneg |
7767                                                SUPPORTED_FIBRE |
7768                                                SUPPORTED_Pause |
7769                                                SUPPORTED_Asym_Pause);
7770                         break;
7771
7772                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7773                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7774                                        ext_phy_type);
7775
7776                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7777                                                SUPPORTED_TP |
7778                                                SUPPORTED_Autoneg |
7779                                                SUPPORTED_Pause |
7780                                                SUPPORTED_Asym_Pause);
7781                         break;
7782
7783                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7784                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7785                                        ext_phy_type);
7786
7787                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7788                                                SUPPORTED_10baseT_Full |
7789                                                SUPPORTED_100baseT_Half |
7790                                                SUPPORTED_100baseT_Full |
7791                                                SUPPORTED_1000baseT_Full |
7792                                                SUPPORTED_10000baseT_Full |
7793                                                SUPPORTED_TP |
7794                                                SUPPORTED_Autoneg |
7795                                                SUPPORTED_Pause |
7796                                                SUPPORTED_Asym_Pause);
7797                         break;
7798
7799                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7800                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7801                                   bp->link_params.ext_phy_config);
7802                         break;
7803
7804                 default:
7805                         BNX2X_ERR("NVRAM config error. "
7806                                   "BAD XGXS ext_phy_config 0x%x\n",
7807                                   bp->link_params.ext_phy_config);
7808                         return;
7809                 }
7810
7811                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7812                                            port*0x18);
7813                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7814
7815                 break;
7816
7817         default:
7818                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7819                           bp->port.link_config);
7820                 return;
7821         }
7822         bp->link_params.phy_addr = bp->port.phy_addr;
7823
7824         /* mask what we support according to speed_cap_mask */
7825         if (!(bp->link_params.speed_cap_mask &
7826                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7827                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7828
7829         if (!(bp->link_params.speed_cap_mask &
7830                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7831                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7832
7833         if (!(bp->link_params.speed_cap_mask &
7834                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7835                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7836
7837         if (!(bp->link_params.speed_cap_mask &
7838                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7839                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7840
7841         if (!(bp->link_params.speed_cap_mask &
7842                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7843                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7844                                         SUPPORTED_1000baseT_Full);
7845
7846         if (!(bp->link_params.speed_cap_mask &
7847                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7848                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7849
7850         if (!(bp->link_params.speed_cap_mask &
7851                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7852                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7853
7854         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7855 }
7856
7857 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7858 {
7859         bp->link_params.req_duplex = DUPLEX_FULL;
7860
7861         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7862         case PORT_FEATURE_LINK_SPEED_AUTO:
7863                 if (bp->port.supported & SUPPORTED_Autoneg) {
7864                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7865                         bp->port.advertising = bp->port.supported;
7866                 } else {
7867                         u32 ext_phy_type =
7868                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7869
7870                         if ((ext_phy_type ==
7871                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7872                             (ext_phy_type ==
7873                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7874                                 /* force 10G, no AN */
7875                                 bp->link_params.req_line_speed = SPEED_10000;
7876                                 bp->port.advertising =
7877                                                 (ADVERTISED_10000baseT_Full |
7878                                                  ADVERTISED_FIBRE);
7879                                 break;
7880                         }
7881                         BNX2X_ERR("NVRAM config error. "
7882                                   "Invalid link_config 0x%x"
7883                                   "  Autoneg not supported\n",
7884                                   bp->port.link_config);
7885                         return;
7886                 }
7887                 break;
7888
7889         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7890                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7891                         bp->link_params.req_line_speed = SPEED_10;
7892                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7893                                                 ADVERTISED_TP);
7894                 } else {
7895                         BNX2X_ERR("NVRAM config error. "
7896                                   "Invalid link_config 0x%x"
7897                                   "  speed_cap_mask 0x%x\n",
7898                                   bp->port.link_config,
7899                                   bp->link_params.speed_cap_mask);
7900                         return;
7901                 }
7902                 break;
7903
7904         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7905                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7906                         bp->link_params.req_line_speed = SPEED_10;
7907                         bp->link_params.req_duplex = DUPLEX_HALF;
7908                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7909                                                 ADVERTISED_TP);
7910                 } else {
7911                         BNX2X_ERR("NVRAM config error. "
7912                                   "Invalid link_config 0x%x"
7913                                   "  speed_cap_mask 0x%x\n",
7914                                   bp->port.link_config,
7915                                   bp->link_params.speed_cap_mask);
7916                         return;
7917                 }
7918                 break;
7919
7920         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7921                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7922                         bp->link_params.req_line_speed = SPEED_100;
7923                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7924                                                 ADVERTISED_TP);
7925                 } else {
7926                         BNX2X_ERR("NVRAM config error. "
7927                                   "Invalid link_config 0x%x"
7928                                   "  speed_cap_mask 0x%x\n",
7929                                   bp->port.link_config,
7930                                   bp->link_params.speed_cap_mask);
7931                         return;
7932                 }
7933                 break;
7934
7935         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7936                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7937                         bp->link_params.req_line_speed = SPEED_100;
7938                         bp->link_params.req_duplex = DUPLEX_HALF;
7939                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7940                                                 ADVERTISED_TP);
7941                 } else {
7942                         BNX2X_ERR("NVRAM config error. "
7943                                   "Invalid link_config 0x%x"
7944                                   "  speed_cap_mask 0x%x\n",
7945                                   bp->port.link_config,
7946                                   bp->link_params.speed_cap_mask);
7947                         return;
7948                 }
7949                 break;
7950
7951         case PORT_FEATURE_LINK_SPEED_1G:
7952                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7953                         bp->link_params.req_line_speed = SPEED_1000;
7954                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7955                                                 ADVERTISED_TP);
7956                 } else {
7957                         BNX2X_ERR("NVRAM config error. "
7958                                   "Invalid link_config 0x%x"
7959                                   "  speed_cap_mask 0x%x\n",
7960                                   bp->port.link_config,
7961                                   bp->link_params.speed_cap_mask);
7962                         return;
7963                 }
7964                 break;
7965
7966         case PORT_FEATURE_LINK_SPEED_2_5G:
7967                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7968                         bp->link_params.req_line_speed = SPEED_2500;
7969                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7970                                                 ADVERTISED_TP);
7971                 } else {
7972                         BNX2X_ERR("NVRAM config error. "
7973                                   "Invalid link_config 0x%x"
7974                                   "  speed_cap_mask 0x%x\n",
7975                                   bp->port.link_config,
7976                                   bp->link_params.speed_cap_mask);
7977                         return;
7978                 }
7979                 break;
7980
7981         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7982         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7983         case PORT_FEATURE_LINK_SPEED_10G_KR:
7984                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7985                         bp->link_params.req_line_speed = SPEED_10000;
7986                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7987                                                 ADVERTISED_FIBRE);
7988                 } else {
7989                         BNX2X_ERR("NVRAM config error. "
7990                                   "Invalid link_config 0x%x"
7991                                   "  speed_cap_mask 0x%x\n",
7992                                   bp->port.link_config,
7993                                   bp->link_params.speed_cap_mask);
7994                         return;
7995                 }
7996                 break;
7997
7998         default:
7999                 BNX2X_ERR("NVRAM config error. "
8000                           "BAD link speed link_config 0x%x\n",
8001                           bp->port.link_config);
8002                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8003                 bp->port.advertising = bp->port.supported;
8004                 break;
8005         }
8006
8007         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8008                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8009         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8010             !(bp->port.supported & SUPPORTED_Autoneg))
8011                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8012
8013         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8014                        "  advertising 0x%x\n",
8015                        bp->link_params.req_line_speed,
8016                        bp->link_params.req_duplex,
8017                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8018 }
8019
8020 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8021 {
8022         int port = BP_PORT(bp);
8023         u32 val, val2;
8024         u32 config;
8025         u16 i;
8026
8027         bp->link_params.bp = bp;
8028         bp->link_params.port = port;
8029
8030         bp->link_params.lane_config =
8031                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8032         bp->link_params.ext_phy_config =
8033                 SHMEM_RD(bp,
8034                          dev_info.port_hw_config[port].external_phy_config);
8035         bp->link_params.speed_cap_mask =
8036                 SHMEM_RD(bp,
8037                          dev_info.port_hw_config[port].speed_capability_mask);
8038
8039         bp->port.link_config =
8040                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8041
8042         /* Get the 4 lanes xgxs config rx and tx */
8043         for (i = 0; i < 2; i++) {
8044                 val = SHMEM_RD(bp,
8045                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8046                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8047                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8048
8049                 val = SHMEM_RD(bp,
8050                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8051                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8052                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8053         }
8054
8055         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8056         if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8057                 bp->link_params.feature_config_flags |=
8058                                 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8059         else
8060                 bp->link_params.feature_config_flags &=
8061                                 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8062
8063         /* If the device is capable of WoL, set the default state according
8064          * to the HW
8065          */
8066         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8067                    (config & PORT_FEATURE_WOL_ENABLED));
8068
8069         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8070                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8071                        bp->link_params.lane_config,
8072                        bp->link_params.ext_phy_config,
8073                        bp->link_params.speed_cap_mask, bp->port.link_config);
8074
8075         bp->link_params.switch_cfg = (bp->port.link_config &
8076                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8077         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8078
8079         bnx2x_link_settings_requested(bp);
8080
8081         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8082         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8083         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8084         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8085         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8086         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8087         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8088         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8089         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8090         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8091 }
8092
8093 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8094 {
8095         int func = BP_FUNC(bp);
8096         u32 val, val2;
8097         int rc = 0;
8098
8099         bnx2x_get_common_hwinfo(bp);
8100
8101         bp->e1hov = 0;
8102         bp->e1hmf = 0;
8103         if (CHIP_IS_E1H(bp)) {
8104                 bp->mf_config =
8105                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8106
8107                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8108                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8109                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8110
8111                         bp->e1hov = val;
8112                         bp->e1hmf = 1;
8113                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
8114                                        "(0x%04x)\n",
8115                                        func, bp->e1hov, bp->e1hov);
8116                 } else {
8117                         BNX2X_DEV_INFO("single function mode\n");
8118                         if (BP_E1HVN(bp)) {
8119                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8120                                           "  aborting\n", func);
8121                                 rc = -EPERM;
8122                         }
8123                 }
8124         }
8125
8126         if (!BP_NOMCP(bp)) {
8127                 bnx2x_get_port_hwinfo(bp);
8128
8129                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8130                               DRV_MSG_SEQ_NUMBER_MASK);
8131                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8132         }
8133
8134         if (IS_E1HMF(bp)) {
8135                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8136                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8137                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8138                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8139                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8140                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8141                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8142                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8143                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8144                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8145                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8146                                ETH_ALEN);
8147                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8148                                ETH_ALEN);
8149                 }
8150
8151                 return rc;
8152         }
8153
8154         if (BP_NOMCP(bp)) {
8155                 /* only supposed to happen on emulation/FPGA */
8156                 BNX2X_ERR("warning random MAC workaround active\n");
8157                 random_ether_addr(bp->dev->dev_addr);
8158                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8159         }
8160
8161         return rc;
8162 }
8163
8164 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8165 {
8166         int func = BP_FUNC(bp);
8167         int timer_interval;
8168         int rc;
8169
8170         /* Disable interrupt handling until HW is initialized */
8171         atomic_set(&bp->intr_sem, 1);
8172
8173         mutex_init(&bp->port.phy_mutex);
8174
8175         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8176         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8177
8178         rc = bnx2x_get_hwinfo(bp);
8179
8180         /* need to reset chip if undi was active */
8181         if (!BP_NOMCP(bp))
8182                 bnx2x_undi_unload(bp);
8183
8184         if (CHIP_REV_IS_FPGA(bp))
8185                 printk(KERN_ERR PFX "FPGA detected\n");
8186
8187         if (BP_NOMCP(bp) && (func == 0))
8188                 printk(KERN_ERR PFX
8189                        "MCP disabled, must load devices in order!\n");
8190
8191         /* Set multi queue mode */
8192         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8193             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8194                 printk(KERN_ERR PFX
8195                       "Multi disabled since int_mode requested is not MSI-X\n");
8196                 multi_mode = ETH_RSS_MODE_DISABLED;
8197         }
8198         bp->multi_mode = multi_mode;
8199
8200
8201         /* Set TPA flags */
8202         if (disable_tpa) {
8203                 bp->flags &= ~TPA_ENABLE_FLAG;
8204                 bp->dev->features &= ~NETIF_F_LRO;
8205         } else {
8206                 bp->flags |= TPA_ENABLE_FLAG;
8207                 bp->dev->features |= NETIF_F_LRO;
8208         }
8209
8210         bp->mrrs = mrrs;
8211
8212         bp->tx_ring_size = MAX_TX_AVAIL;
8213         bp->rx_ring_size = MAX_RX_AVAIL;
8214
8215         bp->rx_csum = 1;
8216
8217         bp->tx_ticks = 50;
8218         bp->rx_ticks = 25;
8219
8220         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8221         bp->current_interval = (poll ? poll : timer_interval);
8222
8223         init_timer(&bp->timer);
8224         bp->timer.expires = jiffies + bp->current_interval;
8225         bp->timer.data = (unsigned long) bp;
8226         bp->timer.function = bnx2x_timer;
8227
8228         return rc;
8229 }
8230
8231 /*
8232  * ethtool service functions
8233  */
8234
8235 /* All ethtool functions called with rtnl_lock */
8236
8237 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8238 {
8239         struct bnx2x *bp = netdev_priv(dev);
8240
8241         cmd->supported = bp->port.supported;
8242         cmd->advertising = bp->port.advertising;
8243
8244         if (netif_carrier_ok(dev)) {
8245                 cmd->speed = bp->link_vars.line_speed;
8246                 cmd->duplex = bp->link_vars.duplex;
8247         } else {
8248                 cmd->speed = bp->link_params.req_line_speed;
8249                 cmd->duplex = bp->link_params.req_duplex;
8250         }
8251         if (IS_E1HMF(bp)) {
8252                 u16 vn_max_rate;
8253
8254                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8255                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8256                 if (vn_max_rate < cmd->speed)
8257                         cmd->speed = vn_max_rate;
8258         }
8259
8260         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8261                 u32 ext_phy_type =
8262                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8263
8264                 switch (ext_phy_type) {
8265                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8266                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8267                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8268                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8269                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8270                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8271                         cmd->port = PORT_FIBRE;
8272                         break;
8273
8274                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8275                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8276                         cmd->port = PORT_TP;
8277                         break;
8278
8279                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8280                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8281                                   bp->link_params.ext_phy_config);
8282                         break;
8283
8284                 default:
8285                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8286                            bp->link_params.ext_phy_config);
8287                         break;
8288                 }
8289         } else
8290                 cmd->port = PORT_TP;
8291
8292         cmd->phy_address = bp->port.phy_addr;
8293         cmd->transceiver = XCVR_INTERNAL;
8294
8295         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8296                 cmd->autoneg = AUTONEG_ENABLE;
8297         else
8298                 cmd->autoneg = AUTONEG_DISABLE;
8299
8300         cmd->maxtxpkt = 0;
8301         cmd->maxrxpkt = 0;
8302
8303         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8304            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8305            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8306            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8307            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8308            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8309            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8310
8311         return 0;
8312 }
8313
8314 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8315 {
8316         struct bnx2x *bp = netdev_priv(dev);
8317         u32 advertising;
8318
8319         if (IS_E1HMF(bp))
8320                 return 0;
8321
8322         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8323            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8324            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8325            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8326            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8327            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8328            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8329
8330         if (cmd->autoneg == AUTONEG_ENABLE) {
8331                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8332                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8333                         return -EINVAL;
8334                 }
8335
8336                 /* advertise the requested speed and duplex if supported */
8337                 cmd->advertising &= bp->port.supported;
8338
8339                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8340                 bp->link_params.req_duplex = DUPLEX_FULL;
8341                 bp->port.advertising |= (ADVERTISED_Autoneg |
8342                                          cmd->advertising);
8343
8344         } else { /* forced speed */
8345                 /* advertise the requested speed and duplex if supported */
8346                 switch (cmd->speed) {
8347                 case SPEED_10:
8348                         if (cmd->duplex == DUPLEX_FULL) {
8349                                 if (!(bp->port.supported &
8350                                       SUPPORTED_10baseT_Full)) {
8351                                         DP(NETIF_MSG_LINK,
8352                                            "10M full not supported\n");
8353                                         return -EINVAL;
8354                                 }
8355
8356                                 advertising = (ADVERTISED_10baseT_Full |
8357                                                ADVERTISED_TP);
8358                         } else {
8359                                 if (!(bp->port.supported &
8360                                       SUPPORTED_10baseT_Half)) {
8361                                         DP(NETIF_MSG_LINK,
8362                                            "10M half not supported\n");
8363                                         return -EINVAL;
8364                                 }
8365
8366                                 advertising = (ADVERTISED_10baseT_Half |
8367                                                ADVERTISED_TP);
8368                         }
8369                         break;
8370
8371                 case SPEED_100:
8372                         if (cmd->duplex == DUPLEX_FULL) {
8373                                 if (!(bp->port.supported &
8374                                                 SUPPORTED_100baseT_Full)) {
8375                                         DP(NETIF_MSG_LINK,
8376                                            "100M full not supported\n");
8377                                         return -EINVAL;
8378                                 }
8379
8380                                 advertising = (ADVERTISED_100baseT_Full |
8381                                                ADVERTISED_TP);
8382                         } else {
8383                                 if (!(bp->port.supported &
8384                                                 SUPPORTED_100baseT_Half)) {
8385                                         DP(NETIF_MSG_LINK,
8386                                            "100M half not supported\n");
8387                                         return -EINVAL;
8388                                 }
8389
8390                                 advertising = (ADVERTISED_100baseT_Half |
8391                                                ADVERTISED_TP);
8392                         }
8393                         break;
8394
8395                 case SPEED_1000:
8396                         if (cmd->duplex != DUPLEX_FULL) {
8397                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8398                                 return -EINVAL;
8399                         }
8400
8401                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8402                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8403                                 return -EINVAL;
8404                         }
8405
8406                         advertising = (ADVERTISED_1000baseT_Full |
8407                                        ADVERTISED_TP);
8408                         break;
8409
8410                 case SPEED_2500:
8411                         if (cmd->duplex != DUPLEX_FULL) {
8412                                 DP(NETIF_MSG_LINK,
8413                                    "2.5G half not supported\n");
8414                                 return -EINVAL;
8415                         }
8416
8417                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8418                                 DP(NETIF_MSG_LINK,
8419                                    "2.5G full not supported\n");
8420                                 return -EINVAL;
8421                         }
8422
8423                         advertising = (ADVERTISED_2500baseX_Full |
8424                                        ADVERTISED_TP);
8425                         break;
8426
8427                 case SPEED_10000:
8428                         if (cmd->duplex != DUPLEX_FULL) {
8429                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8430                                 return -EINVAL;
8431                         }
8432
8433                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8434                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8435                                 return -EINVAL;
8436                         }
8437
8438                         advertising = (ADVERTISED_10000baseT_Full |
8439                                        ADVERTISED_FIBRE);
8440                         break;
8441
8442                 default:
8443                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8444                         return -EINVAL;
8445                 }
8446
8447                 bp->link_params.req_line_speed = cmd->speed;
8448                 bp->link_params.req_duplex = cmd->duplex;
8449                 bp->port.advertising = advertising;
8450         }
8451
8452         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8453            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8454            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8455            bp->port.advertising);
8456
8457         if (netif_running(dev)) {
8458                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8459                 bnx2x_link_set(bp);
8460         }
8461
8462         return 0;
8463 }
8464
8465 #define PHY_FW_VER_LEN                  10
8466
8467 static void bnx2x_get_drvinfo(struct net_device *dev,
8468                               struct ethtool_drvinfo *info)
8469 {
8470         struct bnx2x *bp = netdev_priv(dev);
8471         u8 phy_fw_ver[PHY_FW_VER_LEN];
8472
8473         strcpy(info->driver, DRV_MODULE_NAME);
8474         strcpy(info->version, DRV_MODULE_VERSION);
8475
8476         phy_fw_ver[0] = '\0';
8477         if (bp->port.pmf) {
8478                 bnx2x_acquire_phy_lock(bp);
8479                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8480                                              (bp->state != BNX2X_STATE_CLOSED),
8481                                              phy_fw_ver, PHY_FW_VER_LEN);
8482                 bnx2x_release_phy_lock(bp);
8483         }
8484
8485         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8486                  (bp->common.bc_ver & 0xff0000) >> 16,
8487                  (bp->common.bc_ver & 0xff00) >> 8,
8488                  (bp->common.bc_ver & 0xff),
8489                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8490         strcpy(info->bus_info, pci_name(bp->pdev));
8491         info->n_stats = BNX2X_NUM_STATS;
8492         info->testinfo_len = BNX2X_NUM_TESTS;
8493         info->eedump_len = bp->common.flash_size;
8494         info->regdump_len = 0;
8495 }
8496
8497 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8498 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8499
8500 static int bnx2x_get_regs_len(struct net_device *dev)
8501 {
8502         static u32 regdump_len;
8503         struct bnx2x *bp = netdev_priv(dev);
8504         int i;
8505
8506         if (regdump_len)
8507                 return regdump_len;
8508
8509         if (CHIP_IS_E1(bp)) {
8510                 for (i = 0; i < REGS_COUNT; i++)
8511                         if (IS_E1_ONLINE(reg_addrs[i].info))
8512                                 regdump_len += reg_addrs[i].size;
8513
8514                 for (i = 0; i < WREGS_COUNT_E1; i++)
8515                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8516                                 regdump_len += wreg_addrs_e1[i].size *
8517                                         (1 + wreg_addrs_e1[i].read_regs_count);
8518
8519         } else { /* E1H */
8520                 for (i = 0; i < REGS_COUNT; i++)
8521                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8522                                 regdump_len += reg_addrs[i].size;
8523
8524                 for (i = 0; i < WREGS_COUNT_E1H; i++)
8525                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8526                                 regdump_len += wreg_addrs_e1h[i].size *
8527                                         (1 + wreg_addrs_e1h[i].read_regs_count);
8528         }
8529         regdump_len *= 4;
8530         regdump_len += sizeof(struct dump_hdr);
8531
8532         return regdump_len;
8533 }
8534
8535 static void bnx2x_get_regs(struct net_device *dev,
8536                            struct ethtool_regs *regs, void *_p)
8537 {
8538         u32 *p = _p, i, j;
8539         struct bnx2x *bp = netdev_priv(dev);
8540         struct dump_hdr dump_hdr = {0};
8541
8542         regs->version = 0;
8543         memset(p, 0, regs->len);
8544
8545         if (!netif_running(bp->dev))
8546                 return;
8547
8548         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8549         dump_hdr.dump_sign = dump_sign_all;
8550         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8551         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8552         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8553         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8554         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8555
8556         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8557         p += dump_hdr.hdr_size + 1;
8558
8559         if (CHIP_IS_E1(bp)) {
8560                 for (i = 0; i < REGS_COUNT; i++)
8561                         if (IS_E1_ONLINE(reg_addrs[i].info))
8562                                 for (j = 0; j < reg_addrs[i].size; j++)
8563                                         *p++ = REG_RD(bp,
8564                                                       reg_addrs[i].addr + j*4);
8565
8566         } else { /* E1H */
8567                 for (i = 0; i < REGS_COUNT; i++)
8568                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8569                                 for (j = 0; j < reg_addrs[i].size; j++)
8570                                         *p++ = REG_RD(bp,
8571                                                       reg_addrs[i].addr + j*4);
8572         }
8573 }
8574
8575 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8576 {
8577         struct bnx2x *bp = netdev_priv(dev);
8578
8579         if (bp->flags & NO_WOL_FLAG) {
8580                 wol->supported = 0;
8581                 wol->wolopts = 0;
8582         } else {
8583                 wol->supported = WAKE_MAGIC;
8584                 if (bp->wol)
8585                         wol->wolopts = WAKE_MAGIC;
8586                 else
8587                         wol->wolopts = 0;
8588         }
8589         memset(&wol->sopass, 0, sizeof(wol->sopass));
8590 }
8591
8592 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8593 {
8594         struct bnx2x *bp = netdev_priv(dev);
8595
8596         if (wol->wolopts & ~WAKE_MAGIC)
8597                 return -EINVAL;
8598
8599         if (wol->wolopts & WAKE_MAGIC) {
8600                 if (bp->flags & NO_WOL_FLAG)
8601                         return -EINVAL;
8602
8603                 bp->wol = 1;
8604         } else
8605                 bp->wol = 0;
8606
8607         return 0;
8608 }
8609
8610 static u32 bnx2x_get_msglevel(struct net_device *dev)
8611 {
8612         struct bnx2x *bp = netdev_priv(dev);
8613
8614         return bp->msglevel;
8615 }
8616
8617 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8618 {
8619         struct bnx2x *bp = netdev_priv(dev);
8620
8621         if (capable(CAP_NET_ADMIN))
8622                 bp->msglevel = level;
8623 }
8624
8625 static int bnx2x_nway_reset(struct net_device *dev)
8626 {
8627         struct bnx2x *bp = netdev_priv(dev);
8628
8629         if (!bp->port.pmf)
8630                 return 0;
8631
8632         if (netif_running(dev)) {
8633                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8634                 bnx2x_link_set(bp);
8635         }
8636
8637         return 0;
8638 }
8639
8640 static int bnx2x_get_eeprom_len(struct net_device *dev)
8641 {
8642         struct bnx2x *bp = netdev_priv(dev);
8643
8644         return bp->common.flash_size;
8645 }
8646
8647 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8648 {
8649         int port = BP_PORT(bp);
8650         int count, i;
8651         u32 val = 0;
8652
8653         /* adjust timeout for emulation/FPGA */
8654         count = NVRAM_TIMEOUT_COUNT;
8655         if (CHIP_REV_IS_SLOW(bp))
8656                 count *= 100;
8657
8658         /* request access to nvram interface */
8659         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8660                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8661
8662         for (i = 0; i < count*10; i++) {
8663                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8664                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8665                         break;
8666
8667                 udelay(5);
8668         }
8669
8670         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8671                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8672                 return -EBUSY;
8673         }
8674
8675         return 0;
8676 }
8677
8678 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8679 {
8680         int port = BP_PORT(bp);
8681         int count, i;
8682         u32 val = 0;
8683
8684         /* adjust timeout for emulation/FPGA */
8685         count = NVRAM_TIMEOUT_COUNT;
8686         if (CHIP_REV_IS_SLOW(bp))
8687                 count *= 100;
8688
8689         /* relinquish nvram interface */
8690         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8691                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8692
8693         for (i = 0; i < count*10; i++) {
8694                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8695                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8696                         break;
8697
8698                 udelay(5);
8699         }
8700
8701         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8702                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8703                 return -EBUSY;
8704         }
8705
8706         return 0;
8707 }
8708
8709 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8710 {
8711         u32 val;
8712
8713         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8714
8715         /* enable both bits, even on read */
8716         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8717                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8718                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8719 }
8720
8721 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8722 {
8723         u32 val;
8724
8725         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8726
8727         /* disable both bits, even after read */
8728         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8729                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8730                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8731 }
8732
8733 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8734                                   u32 cmd_flags)
8735 {
8736         int count, i, rc;
8737         u32 val;
8738
8739         /* build the command word */
8740         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8741
8742         /* need to clear DONE bit separately */
8743         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8744
8745         /* address of the NVRAM to read from */
8746         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8747                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8748
8749         /* issue a read command */
8750         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8751
8752         /* adjust timeout for emulation/FPGA */
8753         count = NVRAM_TIMEOUT_COUNT;
8754         if (CHIP_REV_IS_SLOW(bp))
8755                 count *= 100;
8756
8757         /* wait for completion */
8758         *ret_val = 0;
8759         rc = -EBUSY;
8760         for (i = 0; i < count; i++) {
8761                 udelay(5);
8762                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8763
8764                 if (val & MCPR_NVM_COMMAND_DONE) {
8765                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8766                         /* we read nvram data in cpu order
8767                          * but ethtool sees it as an array of bytes
8768                          * converting to big-endian will do the work */
8769                         *ret_val = cpu_to_be32(val);
8770                         rc = 0;
8771                         break;
8772                 }
8773         }
8774
8775         return rc;
8776 }
8777
8778 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8779                             int buf_size)
8780 {
8781         int rc;
8782         u32 cmd_flags;
8783         __be32 val;
8784
8785         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8786                 DP(BNX2X_MSG_NVM,
8787                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8788                    offset, buf_size);
8789                 return -EINVAL;
8790         }
8791
8792         if (offset + buf_size > bp->common.flash_size) {
8793                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8794                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8795                    offset, buf_size, bp->common.flash_size);
8796                 return -EINVAL;
8797         }
8798
8799         /* request access to nvram interface */
8800         rc = bnx2x_acquire_nvram_lock(bp);
8801         if (rc)
8802                 return rc;
8803
8804         /* enable access to nvram interface */
8805         bnx2x_enable_nvram_access(bp);
8806
8807         /* read the first word(s) */
8808         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8809         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8810                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8811                 memcpy(ret_buf, &val, 4);
8812
8813                 /* advance to the next dword */
8814                 offset += sizeof(u32);
8815                 ret_buf += sizeof(u32);
8816                 buf_size -= sizeof(u32);
8817                 cmd_flags = 0;
8818         }
8819
8820         if (rc == 0) {
8821                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8822                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8823                 memcpy(ret_buf, &val, 4);
8824         }
8825
8826         /* disable access to nvram interface */
8827         bnx2x_disable_nvram_access(bp);
8828         bnx2x_release_nvram_lock(bp);
8829
8830         return rc;
8831 }
8832
8833 static int bnx2x_get_eeprom(struct net_device *dev,
8834                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8835 {
8836         struct bnx2x *bp = netdev_priv(dev);
8837         int rc;
8838
8839         if (!netif_running(dev))
8840                 return -EAGAIN;
8841
8842         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8843            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8844            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8845            eeprom->len, eeprom->len);
8846
8847         /* parameters already validated in ethtool_get_eeprom */
8848
8849         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8850
8851         return rc;
8852 }
8853
8854 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8855                                    u32 cmd_flags)
8856 {
8857         int count, i, rc;
8858
8859         /* build the command word */
8860         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8861
8862         /* need to clear DONE bit separately */
8863         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8864
8865         /* write the data */
8866         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8867
8868         /* address of the NVRAM to write to */
8869         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8870                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8871
8872         /* issue the write command */
8873         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8874
8875         /* adjust timeout for emulation/FPGA */
8876         count = NVRAM_TIMEOUT_COUNT;
8877         if (CHIP_REV_IS_SLOW(bp))
8878                 count *= 100;
8879
8880         /* wait for completion */
8881         rc = -EBUSY;
8882         for (i = 0; i < count; i++) {
8883                 udelay(5);
8884                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8885                 if (val & MCPR_NVM_COMMAND_DONE) {
8886                         rc = 0;
8887                         break;
8888                 }
8889         }
8890
8891         return rc;
8892 }
8893
8894 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8895
8896 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8897                               int buf_size)
8898 {
8899         int rc;
8900         u32 cmd_flags;
8901         u32 align_offset;
8902         __be32 val;
8903
8904         if (offset + buf_size > bp->common.flash_size) {
8905                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8906                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8907                    offset, buf_size, bp->common.flash_size);
8908                 return -EINVAL;
8909         }
8910
8911         /* request access to nvram interface */
8912         rc = bnx2x_acquire_nvram_lock(bp);
8913         if (rc)
8914                 return rc;
8915
8916         /* enable access to nvram interface */
8917         bnx2x_enable_nvram_access(bp);
8918
8919         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8920         align_offset = (offset & ~0x03);
8921         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8922
8923         if (rc == 0) {
8924                 val &= ~(0xff << BYTE_OFFSET(offset));
8925                 val |= (*data_buf << BYTE_OFFSET(offset));
8926
8927                 /* nvram data is returned as an array of bytes
8928                  * convert it back to cpu order */
8929                 val = be32_to_cpu(val);
8930
8931                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8932                                              cmd_flags);
8933         }
8934
8935         /* disable access to nvram interface */
8936         bnx2x_disable_nvram_access(bp);
8937         bnx2x_release_nvram_lock(bp);
8938
8939         return rc;
8940 }
8941
8942 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8943                              int buf_size)
8944 {
8945         int rc;
8946         u32 cmd_flags;
8947         u32 val;
8948         u32 written_so_far;
8949
8950         if (buf_size == 1)      /* ethtool */
8951                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8952
8953         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8954                 DP(BNX2X_MSG_NVM,
8955                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8956                    offset, buf_size);
8957                 return -EINVAL;
8958         }
8959
8960         if (offset + buf_size > bp->common.flash_size) {
8961                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8962                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8963                    offset, buf_size, bp->common.flash_size);
8964                 return -EINVAL;
8965         }
8966
8967         /* request access to nvram interface */
8968         rc = bnx2x_acquire_nvram_lock(bp);
8969         if (rc)
8970                 return rc;
8971
8972         /* enable access to nvram interface */
8973         bnx2x_enable_nvram_access(bp);
8974
8975         written_so_far = 0;
8976         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8977         while ((written_so_far < buf_size) && (rc == 0)) {
8978                 if (written_so_far == (buf_size - sizeof(u32)))
8979                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8980                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8981                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8982                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8983                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8984
8985                 memcpy(&val, data_buf, 4);
8986
8987                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8988
8989                 /* advance to the next dword */
8990                 offset += sizeof(u32);
8991                 data_buf += sizeof(u32);
8992                 written_so_far += sizeof(u32);
8993                 cmd_flags = 0;
8994         }
8995
8996         /* disable access to nvram interface */
8997         bnx2x_disable_nvram_access(bp);
8998         bnx2x_release_nvram_lock(bp);
8999
9000         return rc;
9001 }
9002
9003 static int bnx2x_set_eeprom(struct net_device *dev,
9004                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9005 {
9006         struct bnx2x *bp = netdev_priv(dev);
9007         int rc;
9008
9009         if (!netif_running(dev))
9010                 return -EAGAIN;
9011
9012         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9013            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9014            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9015            eeprom->len, eeprom->len);
9016
9017         /* parameters already validated in ethtool_set_eeprom */
9018
9019         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9020         if (eeprom->magic == 0x00504859)
9021                 if (bp->port.pmf) {
9022
9023                         bnx2x_acquire_phy_lock(bp);
9024                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
9025                                              bp->link_params.ext_phy_config,
9026                                              (bp->state != BNX2X_STATE_CLOSED),
9027                                              eebuf, eeprom->len);
9028                         if ((bp->state == BNX2X_STATE_OPEN) ||
9029                             (bp->state == BNX2X_STATE_DISABLED)) {
9030                                 rc |= bnx2x_link_reset(&bp->link_params,
9031                                                        &bp->link_vars, 1);
9032                                 rc |= bnx2x_phy_init(&bp->link_params,
9033                                                      &bp->link_vars);
9034                         }
9035                         bnx2x_release_phy_lock(bp);
9036
9037                 } else /* Only the PMF can access the PHY */
9038                         return -EINVAL;
9039         else
9040                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9041
9042         return rc;
9043 }
9044
9045 static int bnx2x_get_coalesce(struct net_device *dev,
9046                               struct ethtool_coalesce *coal)
9047 {
9048         struct bnx2x *bp = netdev_priv(dev);
9049
9050         memset(coal, 0, sizeof(struct ethtool_coalesce));
9051
9052         coal->rx_coalesce_usecs = bp->rx_ticks;
9053         coal->tx_coalesce_usecs = bp->tx_ticks;
9054
9055         return 0;
9056 }
9057
9058 static int bnx2x_set_coalesce(struct net_device *dev,
9059                               struct ethtool_coalesce *coal)
9060 {
9061         struct bnx2x *bp = netdev_priv(dev);
9062
9063         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9064         if (bp->rx_ticks > 3000)
9065                 bp->rx_ticks = 3000;
9066
9067         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9068         if (bp->tx_ticks > 0x3000)
9069                 bp->tx_ticks = 0x3000;
9070
9071         if (netif_running(dev))
9072                 bnx2x_update_coalesce(bp);
9073
9074         return 0;
9075 }
9076
9077 static void bnx2x_get_ringparam(struct net_device *dev,
9078                                 struct ethtool_ringparam *ering)
9079 {
9080         struct bnx2x *bp = netdev_priv(dev);
9081
9082         ering->rx_max_pending = MAX_RX_AVAIL;
9083         ering->rx_mini_max_pending = 0;
9084         ering->rx_jumbo_max_pending = 0;
9085
9086         ering->rx_pending = bp->rx_ring_size;
9087         ering->rx_mini_pending = 0;
9088         ering->rx_jumbo_pending = 0;
9089
9090         ering->tx_max_pending = MAX_TX_AVAIL;
9091         ering->tx_pending = bp->tx_ring_size;
9092 }
9093
9094 static int bnx2x_set_ringparam(struct net_device *dev,
9095                                struct ethtool_ringparam *ering)
9096 {
9097         struct bnx2x *bp = netdev_priv(dev);
9098         int rc = 0;
9099
9100         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9101             (ering->tx_pending > MAX_TX_AVAIL) ||
9102             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9103                 return -EINVAL;
9104
9105         bp->rx_ring_size = ering->rx_pending;
9106         bp->tx_ring_size = ering->tx_pending;
9107
9108         if (netif_running(dev)) {
9109                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9110                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9111         }
9112
9113         return rc;
9114 }
9115
9116 static void bnx2x_get_pauseparam(struct net_device *dev,
9117                                  struct ethtool_pauseparam *epause)
9118 {
9119         struct bnx2x *bp = netdev_priv(dev);
9120
9121         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9122                            BNX2X_FLOW_CTRL_AUTO) &&
9123                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9124
9125         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9126                             BNX2X_FLOW_CTRL_RX);
9127         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9128                             BNX2X_FLOW_CTRL_TX);
9129
9130         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9131            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9132            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9133 }
9134
9135 static int bnx2x_set_pauseparam(struct net_device *dev,
9136                                 struct ethtool_pauseparam *epause)
9137 {
9138         struct bnx2x *bp = netdev_priv(dev);
9139
9140         if (IS_E1HMF(bp))
9141                 return 0;
9142
9143         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9144            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9145            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9146
9147         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9148
9149         if (epause->rx_pause)
9150                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9151
9152         if (epause->tx_pause)
9153                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9154
9155         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9156                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9157
9158         if (epause->autoneg) {
9159                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9160                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9161                         return -EINVAL;
9162                 }
9163
9164                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9165                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9166         }
9167
9168         DP(NETIF_MSG_LINK,
9169            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9170
9171         if (netif_running(dev)) {
9172                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9173                 bnx2x_link_set(bp);
9174         }
9175
9176         return 0;
9177 }
9178
9179 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9180 {
9181         struct bnx2x *bp = netdev_priv(dev);
9182         int changed = 0;
9183         int rc = 0;
9184
9185         /* TPA requires Rx CSUM offloading */
9186         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9187                 if (!(dev->features & NETIF_F_LRO)) {
9188                         dev->features |= NETIF_F_LRO;
9189                         bp->flags |= TPA_ENABLE_FLAG;
9190                         changed = 1;
9191                 }
9192
9193         } else if (dev->features & NETIF_F_LRO) {
9194                 dev->features &= ~NETIF_F_LRO;
9195                 bp->flags &= ~TPA_ENABLE_FLAG;
9196                 changed = 1;
9197         }
9198
9199         if (changed && netif_running(dev)) {
9200                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9201                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9202         }
9203
9204         return rc;
9205 }
9206
9207 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9208 {
9209         struct bnx2x *bp = netdev_priv(dev);
9210
9211         return bp->rx_csum;
9212 }
9213
9214 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9215 {
9216         struct bnx2x *bp = netdev_priv(dev);
9217         int rc = 0;
9218
9219         bp->rx_csum = data;
9220
9221         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9222            TPA'ed packets will be discarded due to wrong TCP CSUM */
9223         if (!data) {
9224                 u32 flags = ethtool_op_get_flags(dev);
9225
9226                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9227         }
9228
9229         return rc;
9230 }
9231
9232 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9233 {
9234         if (data) {
9235                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9236                 dev->features |= NETIF_F_TSO6;
9237         } else {
9238                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9239                 dev->features &= ~NETIF_F_TSO6;
9240         }
9241
9242         return 0;
9243 }
9244
9245 static const struct {
9246         char string[ETH_GSTRING_LEN];
9247 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9248         { "register_test (offline)" },
9249         { "memory_test (offline)" },
9250         { "loopback_test (offline)" },
9251         { "nvram_test (online)" },
9252         { "interrupt_test (online)" },
9253         { "link_test (online)" },
9254         { "idle check (online)" }
9255 };
9256
9257 static int bnx2x_self_test_count(struct net_device *dev)
9258 {
9259         return BNX2X_NUM_TESTS;
9260 }
9261
9262 static int bnx2x_test_registers(struct bnx2x *bp)
9263 {
9264         int idx, i, rc = -ENODEV;
9265         u32 wr_val = 0;
9266         int port = BP_PORT(bp);
9267         static const struct {
9268                 u32  offset0;
9269                 u32  offset1;
9270                 u32  mask;
9271         } reg_tbl[] = {
9272 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9273                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9274                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9275                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9276                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9277                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9278                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9279                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9280                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9281                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9282 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9283                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9284                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9285                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9286                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9287                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9288                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9289                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9290                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
9291                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9292 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9293                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9294                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9295                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9296                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9297                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9298                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9299                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9300                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9301                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9302 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9303                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9304                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9305                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9306                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9307                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9308                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9309                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9310
9311                 { 0xffffffff, 0, 0x00000000 }
9312         };
9313
9314         if (!netif_running(bp->dev))
9315                 return rc;
9316
9317         /* Repeat the test twice:
9318            First by writing 0x00000000, second by writing 0xffffffff */
9319         for (idx = 0; idx < 2; idx++) {
9320
9321                 switch (idx) {
9322                 case 0:
9323                         wr_val = 0;
9324                         break;
9325                 case 1:
9326                         wr_val = 0xffffffff;
9327                         break;
9328                 }
9329
9330                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9331                         u32 offset, mask, save_val, val;
9332
9333                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9334                         mask = reg_tbl[i].mask;
9335
9336                         save_val = REG_RD(bp, offset);
9337
9338                         REG_WR(bp, offset, wr_val);
9339                         val = REG_RD(bp, offset);
9340
9341                         /* Restore the original register's value */
9342                         REG_WR(bp, offset, save_val);
9343
9344                         /* verify that value is as expected value */
9345                         if ((val & mask) != (wr_val & mask))
9346                                 goto test_reg_exit;
9347                 }
9348         }
9349
9350         rc = 0;
9351
9352 test_reg_exit:
9353         return rc;
9354 }
9355
9356 static int bnx2x_test_memory(struct bnx2x *bp)
9357 {
9358         int i, j, rc = -ENODEV;
9359         u32 val;
9360         static const struct {
9361                 u32 offset;
9362                 int size;
9363         } mem_tbl[] = {
9364                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9365                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9366                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9367                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9368                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9369                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9370                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9371
9372                 { 0xffffffff, 0 }
9373         };
9374         static const struct {
9375                 char *name;
9376                 u32 offset;
9377                 u32 e1_mask;
9378                 u32 e1h_mask;
9379         } prty_tbl[] = {
9380                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9381                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9382                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9383                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9384                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9385                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9386
9387                 { NULL, 0xffffffff, 0, 0 }
9388         };
9389
9390         if (!netif_running(bp->dev))
9391                 return rc;
9392
9393         /* Go through all the memories */
9394         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9395                 for (j = 0; j < mem_tbl[i].size; j++)
9396                         REG_RD(bp, mem_tbl[i].offset + j*4);
9397
9398         /* Check the parity status */
9399         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9400                 val = REG_RD(bp, prty_tbl[i].offset);
9401                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9402                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9403                         DP(NETIF_MSG_HW,
9404                            "%s is 0x%x\n", prty_tbl[i].name, val);
9405                         goto test_mem_exit;
9406                 }
9407         }
9408
9409         rc = 0;
9410
9411 test_mem_exit:
9412         return rc;
9413 }
9414
9415 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9416 {
9417         int cnt = 1000;
9418
9419         if (link_up)
9420                 while (bnx2x_link_test(bp) && cnt--)
9421                         msleep(10);
9422 }
9423
9424 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9425 {
9426         unsigned int pkt_size, num_pkts, i;
9427         struct sk_buff *skb;
9428         unsigned char *packet;
9429         struct bnx2x_fastpath *fp = &bp->fp[0];
9430         u16 tx_start_idx, tx_idx;
9431         u16 rx_start_idx, rx_idx;
9432         u16 pkt_prod;
9433         struct sw_tx_bd *tx_buf;
9434         struct eth_tx_bd *tx_bd;
9435         dma_addr_t mapping;
9436         union eth_rx_cqe *cqe;
9437         u8 cqe_fp_flags;
9438         struct sw_rx_bd *rx_buf;
9439         u16 len;
9440         int rc = -ENODEV;
9441
9442         /* check the loopback mode */
9443         switch (loopback_mode) {
9444         case BNX2X_PHY_LOOPBACK:
9445                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9446                         return -EINVAL;
9447                 break;
9448         case BNX2X_MAC_LOOPBACK:
9449                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9450                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9451                 break;
9452         default:
9453                 return -EINVAL;
9454         }
9455
9456         /* prepare the loopback packet */
9457         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9458                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9459         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9460         if (!skb) {
9461                 rc = -ENOMEM;
9462                 goto test_loopback_exit;
9463         }
9464         packet = skb_put(skb, pkt_size);
9465         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9466         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9467         for (i = ETH_HLEN; i < pkt_size; i++)
9468                 packet[i] = (unsigned char) (i & 0xff);
9469
9470         /* send the loopback packet */
9471         num_pkts = 0;
9472         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9473         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9474
9475         pkt_prod = fp->tx_pkt_prod++;
9476         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9477         tx_buf->first_bd = fp->tx_bd_prod;
9478         tx_buf->skb = skb;
9479
9480         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9481         mapping = pci_map_single(bp->pdev, skb->data,
9482                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9483         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9484         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9485         tx_bd->nbd = cpu_to_le16(1);
9486         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9487         tx_bd->vlan = cpu_to_le16(pkt_prod);
9488         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9489                                        ETH_TX_BD_FLAGS_END_BD);
9490         tx_bd->general_data = ((UNICAST_ADDRESS <<
9491                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9492
9493         wmb();
9494
9495         le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9496         mb(); /* FW restriction: must not reorder writing nbd and packets */
9497         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9498         DOORBELL(bp, fp->index, 0);
9499
9500         mmiowb();
9501
9502         num_pkts++;
9503         fp->tx_bd_prod++;
9504         bp->dev->trans_start = jiffies;
9505
9506         udelay(100);
9507
9508         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9509         if (tx_idx != tx_start_idx + num_pkts)
9510                 goto test_loopback_exit;
9511
9512         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9513         if (rx_idx != rx_start_idx + num_pkts)
9514                 goto test_loopback_exit;
9515
9516         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9517         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9518         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9519                 goto test_loopback_rx_exit;
9520
9521         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9522         if (len != pkt_size)
9523                 goto test_loopback_rx_exit;
9524
9525         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9526         skb = rx_buf->skb;
9527         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9528         for (i = ETH_HLEN; i < pkt_size; i++)
9529                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9530                         goto test_loopback_rx_exit;
9531
9532         rc = 0;
9533
9534 test_loopback_rx_exit:
9535
9536         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9537         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9538         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9539         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9540
9541         /* Update producers */
9542         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9543                              fp->rx_sge_prod);
9544
9545 test_loopback_exit:
9546         bp->link_params.loopback_mode = LOOPBACK_NONE;
9547
9548         return rc;
9549 }
9550
9551 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9552 {
9553         int rc = 0, res;
9554
9555         if (!netif_running(bp->dev))
9556                 return BNX2X_LOOPBACK_FAILED;
9557
9558         bnx2x_netif_stop(bp, 1);
9559         bnx2x_acquire_phy_lock(bp);
9560
9561         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9562         if (res) {
9563                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
9564                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9565         }
9566
9567         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9568         if (res) {
9569                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
9570                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9571         }
9572
9573         bnx2x_release_phy_lock(bp);
9574         bnx2x_netif_start(bp);
9575
9576         return rc;
9577 }
9578
9579 #define CRC32_RESIDUAL                  0xdebb20e3
9580
9581 static int bnx2x_test_nvram(struct bnx2x *bp)
9582 {
9583         static const struct {
9584                 int offset;
9585                 int size;
9586         } nvram_tbl[] = {
9587                 {     0,  0x14 }, /* bootstrap */
9588                 {  0x14,  0xec }, /* dir */
9589                 { 0x100, 0x350 }, /* manuf_info */
9590                 { 0x450,  0xf0 }, /* feature_info */
9591                 { 0x640,  0x64 }, /* upgrade_key_info */
9592                 { 0x6a4,  0x64 },
9593                 { 0x708,  0x70 }, /* manuf_key_info */
9594                 { 0x778,  0x70 },
9595                 {     0,     0 }
9596         };
9597         __be32 buf[0x350 / 4];
9598         u8 *data = (u8 *)buf;
9599         int i, rc;
9600         u32 magic, csum;
9601
9602         rc = bnx2x_nvram_read(bp, 0, data, 4);
9603         if (rc) {
9604                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9605                 goto test_nvram_exit;
9606         }
9607
9608         magic = be32_to_cpu(buf[0]);
9609         if (magic != 0x669955aa) {
9610                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9611                 rc = -ENODEV;
9612                 goto test_nvram_exit;
9613         }
9614
9615         for (i = 0; nvram_tbl[i].size; i++) {
9616
9617                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9618                                       nvram_tbl[i].size);
9619                 if (rc) {
9620                         DP(NETIF_MSG_PROBE,
9621                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9622                         goto test_nvram_exit;
9623                 }
9624
9625                 csum = ether_crc_le(nvram_tbl[i].size, data);
9626                 if (csum != CRC32_RESIDUAL) {
9627                         DP(NETIF_MSG_PROBE,
9628                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9629                         rc = -ENODEV;
9630                         goto test_nvram_exit;
9631                 }
9632         }
9633
9634 test_nvram_exit:
9635         return rc;
9636 }
9637
9638 static int bnx2x_test_intr(struct bnx2x *bp)
9639 {
9640         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9641         int i, rc;
9642
9643         if (!netif_running(bp->dev))
9644                 return -ENODEV;
9645
9646         config->hdr.length = 0;
9647         if (CHIP_IS_E1(bp))
9648                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9649         else
9650                 config->hdr.offset = BP_FUNC(bp);
9651         config->hdr.client_id = bp->fp->cl_id;
9652         config->hdr.reserved1 = 0;
9653
9654         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9655                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9656                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9657         if (rc == 0) {
9658                 bp->set_mac_pending++;
9659                 for (i = 0; i < 10; i++) {
9660                         if (!bp->set_mac_pending)
9661                                 break;
9662                         msleep_interruptible(10);
9663                 }
9664                 if (i == 10)
9665                         rc = -ENODEV;
9666         }
9667
9668         return rc;
9669 }
9670
9671 static void bnx2x_self_test(struct net_device *dev,
9672                             struct ethtool_test *etest, u64 *buf)
9673 {
9674         struct bnx2x *bp = netdev_priv(dev);
9675
9676         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9677
9678         if (!netif_running(dev))
9679                 return;
9680
9681         /* offline tests are not supported in MF mode */
9682         if (IS_E1HMF(bp))
9683                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9684
9685         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9686                 u8 link_up;
9687
9688                 link_up = bp->link_vars.link_up;
9689                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9690                 bnx2x_nic_load(bp, LOAD_DIAG);
9691                 /* wait until link state is restored */
9692                 bnx2x_wait_for_link(bp, link_up);
9693
9694                 if (bnx2x_test_registers(bp) != 0) {
9695                         buf[0] = 1;
9696                         etest->flags |= ETH_TEST_FL_FAILED;
9697                 }
9698                 if (bnx2x_test_memory(bp) != 0) {
9699                         buf[1] = 1;
9700                         etest->flags |= ETH_TEST_FL_FAILED;
9701                 }
9702                 buf[2] = bnx2x_test_loopback(bp, link_up);
9703                 if (buf[2] != 0)
9704                         etest->flags |= ETH_TEST_FL_FAILED;
9705
9706                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9707                 bnx2x_nic_load(bp, LOAD_NORMAL);
9708                 /* wait until link state is restored */
9709                 bnx2x_wait_for_link(bp, link_up);
9710         }
9711         if (bnx2x_test_nvram(bp) != 0) {
9712                 buf[3] = 1;
9713                 etest->flags |= ETH_TEST_FL_FAILED;
9714         }
9715         if (bnx2x_test_intr(bp) != 0) {
9716                 buf[4] = 1;
9717                 etest->flags |= ETH_TEST_FL_FAILED;
9718         }
9719         if (bp->port.pmf)
9720                 if (bnx2x_link_test(bp) != 0) {
9721                         buf[5] = 1;
9722                         etest->flags |= ETH_TEST_FL_FAILED;
9723                 }
9724
9725 #ifdef BNX2X_EXTRA_DEBUG
9726         bnx2x_panic_dump(bp);
9727 #endif
9728 }
9729
9730 static const struct {
9731         long offset;
9732         int size;
9733         u8 string[ETH_GSTRING_LEN];
9734 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9735 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9736         { Q_STATS_OFFSET32(error_bytes_received_hi),
9737                                                 8, "[%d]: rx_error_bytes" },
9738         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9739                                                 8, "[%d]: rx_ucast_packets" },
9740         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9741                                                 8, "[%d]: rx_mcast_packets" },
9742         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9743                                                 8, "[%d]: rx_bcast_packets" },
9744         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9745         { Q_STATS_OFFSET32(rx_err_discard_pkt),
9746                                          4, "[%d]: rx_phy_ip_err_discards"},
9747         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9748                                          4, "[%d]: rx_skb_alloc_discard" },
9749         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9750
9751 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9752         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9753                                                         8, "[%d]: tx_packets" }
9754 };
9755
9756 static const struct {
9757         long offset;
9758         int size;
9759         u32 flags;
9760 #define STATS_FLAGS_PORT                1
9761 #define STATS_FLAGS_FUNC                2
9762 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9763         u8 string[ETH_GSTRING_LEN];
9764 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9765 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9766                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
9767         { STATS_OFFSET32(error_bytes_received_hi),
9768                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9769         { STATS_OFFSET32(total_unicast_packets_received_hi),
9770                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9771         { STATS_OFFSET32(total_multicast_packets_received_hi),
9772                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9773         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9774                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9775         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9776                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9777         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9778                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9779         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9780                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9781         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9782                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9783 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9784                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9785         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9786                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9787         { STATS_OFFSET32(no_buff_discard_hi),
9788                                 8, STATS_FLAGS_BOTH, "rx_discards" },
9789         { STATS_OFFSET32(mac_filter_discard),
9790                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9791         { STATS_OFFSET32(xxoverflow_discard),
9792                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9793         { STATS_OFFSET32(brb_drop_hi),
9794                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9795         { STATS_OFFSET32(brb_truncate_hi),
9796                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9797         { STATS_OFFSET32(pause_frames_received_hi),
9798                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9799         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9800                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9801         { STATS_OFFSET32(nig_timer_max),
9802                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9803 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9804                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9805         { STATS_OFFSET32(rx_skb_alloc_failed),
9806                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9807         { STATS_OFFSET32(hw_csum_err),
9808                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9809
9810         { STATS_OFFSET32(total_bytes_transmitted_hi),
9811                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
9812         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9813                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9814         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9815                                 8, STATS_FLAGS_BOTH, "tx_packets" },
9816         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9817                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9818         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9819                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9820         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9821                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9822         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9823                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9824 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9825                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9826         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9827                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9828         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9829                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9830         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9831                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9832         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9833                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9834         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9835                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9836         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9837                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9838         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9839                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9840         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9841                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9842         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9843                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9844 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9845                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9846         { STATS_OFFSET32(pause_frames_sent_hi),
9847                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9848 };
9849
9850 #define IS_PORT_STAT(i) \
9851         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9852 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9853 #define IS_E1HMF_MODE_STAT(bp) \
9854                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9855
9856 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9857 {
9858         struct bnx2x *bp = netdev_priv(dev);
9859         int i, j, k;
9860
9861         switch (stringset) {
9862         case ETH_SS_STATS:
9863                 if (is_multi(bp)) {
9864                         k = 0;
9865                         for_each_queue(bp, i) {
9866                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9867                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9868                                                 bnx2x_q_stats_arr[j].string, i);
9869                                 k += BNX2X_NUM_Q_STATS;
9870                         }
9871                         if (IS_E1HMF_MODE_STAT(bp))
9872                                 break;
9873                         for (j = 0; j < BNX2X_NUM_STATS; j++)
9874                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9875                                        bnx2x_stats_arr[j].string);
9876                 } else {
9877                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9878                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9879                                         continue;
9880                                 strcpy(buf + j*ETH_GSTRING_LEN,
9881                                        bnx2x_stats_arr[i].string);
9882                                 j++;
9883                         }
9884                 }
9885                 break;
9886
9887         case ETH_SS_TEST:
9888                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9889                 break;
9890         }
9891 }
9892
9893 static int bnx2x_get_stats_count(struct net_device *dev)
9894 {
9895         struct bnx2x *bp = netdev_priv(dev);
9896         int i, num_stats;
9897
9898         if (is_multi(bp)) {
9899                 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9900                 if (!IS_E1HMF_MODE_STAT(bp))
9901                         num_stats += BNX2X_NUM_STATS;
9902         } else {
9903                 if (IS_E1HMF_MODE_STAT(bp)) {
9904                         num_stats = 0;
9905                         for (i = 0; i < BNX2X_NUM_STATS; i++)
9906                                 if (IS_FUNC_STAT(i))
9907                                         num_stats++;
9908                 } else
9909                         num_stats = BNX2X_NUM_STATS;
9910         }
9911
9912         return num_stats;
9913 }
9914
9915 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9916                                     struct ethtool_stats *stats, u64 *buf)
9917 {
9918         struct bnx2x *bp = netdev_priv(dev);
9919         u32 *hw_stats, *offset;
9920         int i, j, k;
9921
9922         if (is_multi(bp)) {
9923                 k = 0;
9924                 for_each_queue(bp, i) {
9925                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9926                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9927                                 if (bnx2x_q_stats_arr[j].size == 0) {
9928                                         /* skip this counter */
9929                                         buf[k + j] = 0;
9930                                         continue;
9931                                 }
9932                                 offset = (hw_stats +
9933                                           bnx2x_q_stats_arr[j].offset);
9934                                 if (bnx2x_q_stats_arr[j].size == 4) {
9935                                         /* 4-byte counter */
9936                                         buf[k + j] = (u64) *offset;
9937                                         continue;
9938                                 }
9939                                 /* 8-byte counter */
9940                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9941                         }
9942                         k += BNX2X_NUM_Q_STATS;
9943                 }
9944                 if (IS_E1HMF_MODE_STAT(bp))
9945                         return;
9946                 hw_stats = (u32 *)&bp->eth_stats;
9947                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9948                         if (bnx2x_stats_arr[j].size == 0) {
9949                                 /* skip this counter */
9950                                 buf[k + j] = 0;
9951                                 continue;
9952                         }
9953                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
9954                         if (bnx2x_stats_arr[j].size == 4) {
9955                                 /* 4-byte counter */
9956                                 buf[k + j] = (u64) *offset;
9957                                 continue;
9958                         }
9959                         /* 8-byte counter */
9960                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
9961                 }
9962         } else {
9963                 hw_stats = (u32 *)&bp->eth_stats;
9964                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9965                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9966                                 continue;
9967                         if (bnx2x_stats_arr[i].size == 0) {
9968                                 /* skip this counter */
9969                                 buf[j] = 0;
9970                                 j++;
9971                                 continue;
9972                         }
9973                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
9974                         if (bnx2x_stats_arr[i].size == 4) {
9975                                 /* 4-byte counter */
9976                                 buf[j] = (u64) *offset;
9977                                 j++;
9978                                 continue;
9979                         }
9980                         /* 8-byte counter */
9981                         buf[j] = HILO_U64(*offset, *(offset + 1));
9982                         j++;
9983                 }
9984         }
9985 }
9986
9987 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9988 {
9989         struct bnx2x *bp = netdev_priv(dev);
9990         int port = BP_PORT(bp);
9991         int i;
9992
9993         if (!netif_running(dev))
9994                 return 0;
9995
9996         if (!bp->port.pmf)
9997                 return 0;
9998
9999         if (data == 0)
10000                 data = 2;
10001
10002         for (i = 0; i < (data * 2); i++) {
10003                 if ((i % 2) == 0)
10004                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10005                                       bp->link_params.hw_led_mode,
10006                                       bp->link_params.chip_id);
10007                 else
10008                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10009                                       bp->link_params.hw_led_mode,
10010                                       bp->link_params.chip_id);
10011
10012                 msleep_interruptible(500);
10013                 if (signal_pending(current))
10014                         break;
10015         }
10016
10017         if (bp->link_vars.link_up)
10018                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10019                               bp->link_vars.line_speed,
10020                               bp->link_params.hw_led_mode,
10021                               bp->link_params.chip_id);
10022
10023         return 0;
10024 }
10025
10026 static struct ethtool_ops bnx2x_ethtool_ops = {
10027         .get_settings           = bnx2x_get_settings,
10028         .set_settings           = bnx2x_set_settings,
10029         .get_drvinfo            = bnx2x_get_drvinfo,
10030         .get_regs_len           = bnx2x_get_regs_len,
10031         .get_regs               = bnx2x_get_regs,
10032         .get_wol                = bnx2x_get_wol,
10033         .set_wol                = bnx2x_set_wol,
10034         .get_msglevel           = bnx2x_get_msglevel,
10035         .set_msglevel           = bnx2x_set_msglevel,
10036         .nway_reset             = bnx2x_nway_reset,
10037         .get_link               = ethtool_op_get_link,
10038         .get_eeprom_len         = bnx2x_get_eeprom_len,
10039         .get_eeprom             = bnx2x_get_eeprom,
10040         .set_eeprom             = bnx2x_set_eeprom,
10041         .get_coalesce           = bnx2x_get_coalesce,
10042         .set_coalesce           = bnx2x_set_coalesce,
10043         .get_ringparam          = bnx2x_get_ringparam,
10044         .set_ringparam          = bnx2x_set_ringparam,
10045         .get_pauseparam         = bnx2x_get_pauseparam,
10046         .set_pauseparam         = bnx2x_set_pauseparam,
10047         .get_rx_csum            = bnx2x_get_rx_csum,
10048         .set_rx_csum            = bnx2x_set_rx_csum,
10049         .get_tx_csum            = ethtool_op_get_tx_csum,
10050         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10051         .set_flags              = bnx2x_set_flags,
10052         .get_flags              = ethtool_op_get_flags,
10053         .get_sg                 = ethtool_op_get_sg,
10054         .set_sg                 = ethtool_op_set_sg,
10055         .get_tso                = ethtool_op_get_tso,
10056         .set_tso                = bnx2x_set_tso,
10057         .self_test_count        = bnx2x_self_test_count,
10058         .self_test              = bnx2x_self_test,
10059         .get_strings            = bnx2x_get_strings,
10060         .phys_id                = bnx2x_phys_id,
10061         .get_stats_count        = bnx2x_get_stats_count,
10062         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10063 };
10064
10065 /* end of ethtool_ops */
10066
10067 /****************************************************************************
10068 * General service functions
10069 ****************************************************************************/
10070
10071 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10072 {
10073         u16 pmcsr;
10074
10075         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10076
10077         switch (state) {
10078         case PCI_D0:
10079                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10080                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10081                                        PCI_PM_CTRL_PME_STATUS));
10082
10083                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10084                         /* delay required during transition out of D3hot */
10085                         msleep(20);
10086                 break;
10087
10088         case PCI_D3hot:
10089                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10090                 pmcsr |= 3;
10091
10092                 if (bp->wol)
10093                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10094
10095                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10096                                       pmcsr);
10097
10098                 /* No more memory access after this point until
10099                 * device is brought back to D0.
10100                 */
10101                 break;
10102
10103         default:
10104                 return -EINVAL;
10105         }
10106         return 0;
10107 }
10108
10109 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10110 {
10111         u16 rx_cons_sb;
10112
10113         /* Tell compiler that status block fields can change */
10114         barrier();
10115         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10116         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10117                 rx_cons_sb++;
10118         return (fp->rx_comp_cons != rx_cons_sb);
10119 }
10120
10121 /*
10122  * net_device service functions
10123  */
10124
10125 static int bnx2x_poll(struct napi_struct *napi, int budget)
10126 {
10127         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10128                                                  napi);
10129         struct bnx2x *bp = fp->bp;
10130         int work_done = 0;
10131
10132 #ifdef BNX2X_STOP_ON_ERROR
10133         if (unlikely(bp->panic))
10134                 goto poll_panic;
10135 #endif
10136
10137         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10138         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10139         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10140
10141         bnx2x_update_fpsb_idx(fp);
10142
10143         if (bnx2x_has_tx_work(fp))
10144                 bnx2x_tx_int(fp);
10145
10146         if (bnx2x_has_rx_work(fp)) {
10147                 work_done = bnx2x_rx_int(fp, budget);
10148
10149                 /* must not complete if we consumed full budget */
10150                 if (work_done >= budget)
10151                         goto poll_again;
10152         }
10153
10154         /* BNX2X_HAS_WORK() reads the status block, thus we need to
10155          * ensure that status block indices have been actually read
10156          * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10157          * so that we won't write the "newer" value of the status block to IGU
10158          * (if there was a DMA right after BNX2X_HAS_WORK and
10159          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10160          * may be postponed to right before bnx2x_ack_sb). In this case
10161          * there will never be another interrupt until there is another update
10162          * of the status block, while there is still unhandled work.
10163          */
10164         rmb();
10165
10166         if (!BNX2X_HAS_WORK(fp)) {
10167 #ifdef BNX2X_STOP_ON_ERROR
10168 poll_panic:
10169 #endif
10170                 napi_complete(napi);
10171
10172                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10173                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10174                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10175                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10176         }
10177
10178 poll_again:
10179         return work_done;
10180 }
10181
10182
10183 /* we split the first BD into headers and data BDs
10184  * to ease the pain of our fellow microcode engineers
10185  * we use one mapping for both BDs
10186  * So far this has only been observed to happen
10187  * in Other Operating Systems(TM)
10188  */
10189 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10190                                    struct bnx2x_fastpath *fp,
10191                                    struct eth_tx_bd **tx_bd, u16 hlen,
10192                                    u16 bd_prod, int nbd)
10193 {
10194         struct eth_tx_bd *h_tx_bd = *tx_bd;
10195         struct eth_tx_bd *d_tx_bd;
10196         dma_addr_t mapping;
10197         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10198
10199         /* first fix first BD */
10200         h_tx_bd->nbd = cpu_to_le16(nbd);
10201         h_tx_bd->nbytes = cpu_to_le16(hlen);
10202
10203         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10204            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10205            h_tx_bd->addr_lo, h_tx_bd->nbd);
10206
10207         /* now get a new data BD
10208          * (after the pbd) and fill it */
10209         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10210         d_tx_bd = &fp->tx_desc_ring[bd_prod];
10211
10212         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10213                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10214
10215         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10216         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10217         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10218         d_tx_bd->vlan = 0;
10219         /* this marks the BD as one that has no individual mapping
10220          * the FW ignores this flag in a BD not marked start
10221          */
10222         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10223         DP(NETIF_MSG_TX_QUEUED,
10224            "TSO split data size is %d (%x:%x)\n",
10225            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10226
10227         /* update tx_bd for marking the last BD flag */
10228         *tx_bd = d_tx_bd;
10229
10230         return bd_prod;
10231 }
10232
10233 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10234 {
10235         if (fix > 0)
10236                 csum = (u16) ~csum_fold(csum_sub(csum,
10237                                 csum_partial(t_header - fix, fix, 0)));
10238
10239         else if (fix < 0)
10240                 csum = (u16) ~csum_fold(csum_add(csum,
10241                                 csum_partial(t_header, -fix, 0)));
10242
10243         return swab16(csum);
10244 }
10245
10246 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10247 {
10248         u32 rc;
10249
10250         if (skb->ip_summed != CHECKSUM_PARTIAL)
10251                 rc = XMIT_PLAIN;
10252
10253         else {
10254                 if (skb->protocol == htons(ETH_P_IPV6)) {
10255                         rc = XMIT_CSUM_V6;
10256                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10257                                 rc |= XMIT_CSUM_TCP;
10258
10259                 } else {
10260                         rc = XMIT_CSUM_V4;
10261                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10262                                 rc |= XMIT_CSUM_TCP;
10263                 }
10264         }
10265
10266         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10267                 rc |= XMIT_GSO_V4;
10268
10269         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10270                 rc |= XMIT_GSO_V6;
10271
10272         return rc;
10273 }
10274
10275 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10276 /* check if packet requires linearization (packet is too fragmented)
10277    no need to check fragmentation if page size > 8K (there will be no
10278    violation to FW restrictions) */
10279 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10280                              u32 xmit_type)
10281 {
10282         int to_copy = 0;
10283         int hlen = 0;
10284         int first_bd_sz = 0;
10285
10286         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10287         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10288
10289                 if (xmit_type & XMIT_GSO) {
10290                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10291                         /* Check if LSO packet needs to be copied:
10292                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10293                         int wnd_size = MAX_FETCH_BD - 3;
10294                         /* Number of windows to check */
10295                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10296                         int wnd_idx = 0;
10297                         int frag_idx = 0;
10298                         u32 wnd_sum = 0;
10299
10300                         /* Headers length */
10301                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10302                                 tcp_hdrlen(skb);
10303
10304                         /* Amount of data (w/o headers) on linear part of SKB*/
10305                         first_bd_sz = skb_headlen(skb) - hlen;
10306
10307                         wnd_sum  = first_bd_sz;
10308
10309                         /* Calculate the first sum - it's special */
10310                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10311                                 wnd_sum +=
10312                                         skb_shinfo(skb)->frags[frag_idx].size;
10313
10314                         /* If there was data on linear skb data - check it */
10315                         if (first_bd_sz > 0) {
10316                                 if (unlikely(wnd_sum < lso_mss)) {
10317                                         to_copy = 1;
10318                                         goto exit_lbl;
10319                                 }
10320
10321                                 wnd_sum -= first_bd_sz;
10322                         }
10323
10324                         /* Others are easier: run through the frag list and
10325                            check all windows */
10326                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10327                                 wnd_sum +=
10328                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10329
10330                                 if (unlikely(wnd_sum < lso_mss)) {
10331                                         to_copy = 1;
10332                                         break;
10333                                 }
10334                                 wnd_sum -=
10335                                         skb_shinfo(skb)->frags[wnd_idx].size;
10336                         }
10337                 } else {
10338                         /* in non-LSO too fragmented packet should always
10339                            be linearized */
10340                         to_copy = 1;
10341                 }
10342         }
10343
10344 exit_lbl:
10345         if (unlikely(to_copy))
10346                 DP(NETIF_MSG_TX_QUEUED,
10347                    "Linearization IS REQUIRED for %s packet. "
10348                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10349                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10350                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10351
10352         return to_copy;
10353 }
10354 #endif
10355
10356 /* called with netif_tx_lock
10357  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10358  * netif_wake_queue()
10359  */
10360 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10361 {
10362         struct bnx2x *bp = netdev_priv(dev);
10363         struct bnx2x_fastpath *fp;
10364         struct netdev_queue *txq;
10365         struct sw_tx_bd *tx_buf;
10366         struct eth_tx_bd *tx_bd;
10367         struct eth_tx_parse_bd *pbd = NULL;
10368         u16 pkt_prod, bd_prod;
10369         int nbd, fp_index;
10370         dma_addr_t mapping;
10371         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10372         int vlan_off = (bp->e1hov ? 4 : 0);
10373         int i;
10374         u8 hlen = 0;
10375
10376 #ifdef BNX2X_STOP_ON_ERROR
10377         if (unlikely(bp->panic))
10378                 return NETDEV_TX_BUSY;
10379 #endif
10380
10381         fp_index = skb_get_queue_mapping(skb);
10382         txq = netdev_get_tx_queue(dev, fp_index);
10383
10384         fp = &bp->fp[fp_index];
10385
10386         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10387                 fp->eth_q_stats.driver_xoff++,
10388                 netif_tx_stop_queue(txq);
10389                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10390                 return NETDEV_TX_BUSY;
10391         }
10392
10393         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10394            "  gso type %x  xmit_type %x\n",
10395            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10396            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10397
10398 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10399         /* First, check if we need to linearize the skb (due to FW
10400            restrictions). No need to check fragmentation if page size > 8K
10401            (there will be no violation to FW restrictions) */
10402         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10403                 /* Statistics of linearization */
10404                 bp->lin_cnt++;
10405                 if (skb_linearize(skb) != 0) {
10406                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10407                            "silently dropping this SKB\n");
10408                         dev_kfree_skb_any(skb);
10409                         return NETDEV_TX_OK;
10410                 }
10411         }
10412 #endif
10413
10414         /*
10415         Please read carefully. First we use one BD which we mark as start,
10416         then for TSO or xsum we have a parsing info BD,
10417         and only then we have the rest of the TSO BDs.
10418         (don't forget to mark the last one as last,
10419         and to unmap only AFTER you write to the BD ...)
10420         And above all, all pdb sizes are in words - NOT DWORDS!
10421         */
10422
10423         pkt_prod = fp->tx_pkt_prod++;
10424         bd_prod = TX_BD(fp->tx_bd_prod);
10425
10426         /* get a tx_buf and first BD */
10427         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10428         tx_bd = &fp->tx_desc_ring[bd_prod];
10429
10430         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10431         tx_bd->general_data = (UNICAST_ADDRESS <<
10432                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10433         /* header nbd */
10434         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10435
10436         /* remember the first BD of the packet */
10437         tx_buf->first_bd = fp->tx_bd_prod;
10438         tx_buf->skb = skb;
10439
10440         DP(NETIF_MSG_TX_QUEUED,
10441            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10442            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10443
10444 #ifdef BCM_VLAN
10445         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10446             (bp->flags & HW_VLAN_TX_FLAG)) {
10447                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10448                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10449                 vlan_off += 4;
10450         } else
10451 #endif
10452                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10453
10454         if (xmit_type) {
10455                 /* turn on parsing and get a BD */
10456                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10457                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10458
10459                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10460         }
10461
10462         if (xmit_type & XMIT_CSUM) {
10463                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10464
10465                 /* for now NS flag is not used in Linux */
10466                 pbd->global_data =
10467                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10468                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10469
10470                 pbd->ip_hlen = (skb_transport_header(skb) -
10471                                 skb_network_header(skb)) / 2;
10472
10473                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10474
10475                 pbd->total_hlen = cpu_to_le16(hlen);
10476                 hlen = hlen*2 - vlan_off;
10477
10478                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10479
10480                 if (xmit_type & XMIT_CSUM_V4)
10481                         tx_bd->bd_flags.as_bitfield |=
10482                                                 ETH_TX_BD_FLAGS_IP_CSUM;
10483                 else
10484                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10485
10486                 if (xmit_type & XMIT_CSUM_TCP) {
10487                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10488
10489                 } else {
10490                         s8 fix = SKB_CS_OFF(skb); /* signed! */
10491
10492                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10493                         pbd->cs_offset = fix / 2;
10494
10495                         DP(NETIF_MSG_TX_QUEUED,
10496                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
10497                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10498                            SKB_CS(skb));
10499
10500                         /* HW bug: fixup the CSUM */
10501                         pbd->tcp_pseudo_csum =
10502                                 bnx2x_csum_fix(skb_transport_header(skb),
10503                                                SKB_CS(skb), fix);
10504
10505                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10506                            pbd->tcp_pseudo_csum);
10507                 }
10508         }
10509
10510         mapping = pci_map_single(bp->pdev, skb->data,
10511                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10512
10513         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10514         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10515         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10516         tx_bd->nbd = cpu_to_le16(nbd);
10517         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10518
10519         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
10520            "  nbytes %d  flags %x  vlan %x\n",
10521            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10522            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10523            le16_to_cpu(tx_bd->vlan));
10524
10525         if (xmit_type & XMIT_GSO) {
10526
10527                 DP(NETIF_MSG_TX_QUEUED,
10528                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
10529                    skb->len, hlen, skb_headlen(skb),
10530                    skb_shinfo(skb)->gso_size);
10531
10532                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10533
10534                 if (unlikely(skb_headlen(skb) > hlen))
10535                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10536                                                  bd_prod, ++nbd);
10537
10538                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10539                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10540                 pbd->tcp_flags = pbd_tcp_flags(skb);
10541
10542                 if (xmit_type & XMIT_GSO_V4) {
10543                         pbd->ip_id = swab16(ip_hdr(skb)->id);
10544                         pbd->tcp_pseudo_csum =
10545                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10546                                                           ip_hdr(skb)->daddr,
10547                                                           0, IPPROTO_TCP, 0));
10548
10549                 } else
10550                         pbd->tcp_pseudo_csum =
10551                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10552                                                         &ipv6_hdr(skb)->daddr,
10553                                                         0, IPPROTO_TCP, 0));
10554
10555                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10556         }
10557
10558         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10559                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10560
10561                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10562                 tx_bd = &fp->tx_desc_ring[bd_prod];
10563
10564                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10565                                        frag->size, PCI_DMA_TODEVICE);
10566
10567                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10568                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10569                 tx_bd->nbytes = cpu_to_le16(frag->size);
10570                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10571                 tx_bd->bd_flags.as_bitfield = 0;
10572
10573                 DP(NETIF_MSG_TX_QUEUED,
10574                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
10575                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10576                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10577         }
10578
10579         /* now at last mark the BD as the last BD */
10580         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10581
10582         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
10583            tx_bd, tx_bd->bd_flags.as_bitfield);
10584
10585         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10586
10587         /* now send a tx doorbell, counting the next BD
10588          * if the packet contains or ends with it
10589          */
10590         if (TX_BD_POFF(bd_prod) < nbd)
10591                 nbd++;
10592
10593         if (pbd)
10594                 DP(NETIF_MSG_TX_QUEUED,
10595                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
10596                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
10597                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10598                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10599                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10600
10601         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
10602
10603         /*
10604          * Make sure that the BD data is updated before updating the producer
10605          * since FW might read the BD right after the producer is updated.
10606          * This is only applicable for weak-ordered memory model archs such
10607          * as IA-64. The following barrier is also mandatory since FW will
10608          * assumes packets must have BDs.
10609          */
10610         wmb();
10611
10612         le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10613         mb(); /* FW restriction: must not reorder writing nbd and packets */
10614         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10615         DOORBELL(bp, fp->index, 0);
10616
10617         mmiowb();
10618
10619         fp->tx_bd_prod += nbd;
10620
10621         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10622                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10623                    if we put Tx into XOFF state. */
10624                 smp_mb();
10625                 netif_tx_stop_queue(txq);
10626                 fp->eth_q_stats.driver_xoff++;
10627                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10628                         netif_tx_wake_queue(txq);
10629         }
10630         fp->tx_pkt++;
10631
10632         return NETDEV_TX_OK;
10633 }
10634
10635 /* called with rtnl_lock */
10636 static int bnx2x_open(struct net_device *dev)
10637 {
10638         struct bnx2x *bp = netdev_priv(dev);
10639
10640         netif_carrier_off(dev);
10641
10642         bnx2x_set_power_state(bp, PCI_D0);
10643
10644         return bnx2x_nic_load(bp, LOAD_OPEN);
10645 }
10646
10647 /* called with rtnl_lock */
10648 static int bnx2x_close(struct net_device *dev)
10649 {
10650         struct bnx2x *bp = netdev_priv(dev);
10651
10652         /* Unload the driver, release IRQs */
10653         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10654         if (atomic_read(&bp->pdev->enable_cnt) == 1)
10655                 if (!CHIP_REV_IS_SLOW(bp))
10656                         bnx2x_set_power_state(bp, PCI_D3hot);
10657
10658         return 0;
10659 }
10660
10661 /* called with netif_tx_lock from dev_mcast.c */
10662 static void bnx2x_set_rx_mode(struct net_device *dev)
10663 {
10664         struct bnx2x *bp = netdev_priv(dev);
10665         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10666         int port = BP_PORT(bp);
10667
10668         if (bp->state != BNX2X_STATE_OPEN) {
10669                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10670                 return;
10671         }
10672
10673         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10674
10675         if (dev->flags & IFF_PROMISC)
10676                 rx_mode = BNX2X_RX_MODE_PROMISC;
10677
10678         else if ((dev->flags & IFF_ALLMULTI) ||
10679                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10680                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10681
10682         else { /* some multicasts */
10683                 if (CHIP_IS_E1(bp)) {
10684                         int i, old, offset;
10685                         struct dev_mc_list *mclist;
10686                         struct mac_configuration_cmd *config =
10687                                                 bnx2x_sp(bp, mcast_config);
10688
10689                         for (i = 0, mclist = dev->mc_list;
10690                              mclist && (i < dev->mc_count);
10691                              i++, mclist = mclist->next) {
10692
10693                                 config->config_table[i].
10694                                         cam_entry.msb_mac_addr =
10695                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
10696                                 config->config_table[i].
10697                                         cam_entry.middle_mac_addr =
10698                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
10699                                 config->config_table[i].
10700                                         cam_entry.lsb_mac_addr =
10701                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
10702                                 config->config_table[i].cam_entry.flags =
10703                                                         cpu_to_le16(port);
10704                                 config->config_table[i].
10705                                         target_table_entry.flags = 0;
10706                                 config->config_table[i].
10707                                         target_table_entry.client_id = 0;
10708                                 config->config_table[i].
10709                                         target_table_entry.vlan_id = 0;
10710
10711                                 DP(NETIF_MSG_IFUP,
10712                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10713                                    config->config_table[i].
10714                                                 cam_entry.msb_mac_addr,
10715                                    config->config_table[i].
10716                                                 cam_entry.middle_mac_addr,
10717                                    config->config_table[i].
10718                                                 cam_entry.lsb_mac_addr);
10719                         }
10720                         old = config->hdr.length;
10721                         if (old > i) {
10722                                 for (; i < old; i++) {
10723                                         if (CAM_IS_INVALID(config->
10724                                                            config_table[i])) {
10725                                                 /* already invalidated */
10726                                                 break;
10727                                         }
10728                                         /* invalidate */
10729                                         CAM_INVALIDATE(config->
10730                                                        config_table[i]);
10731                                 }
10732                         }
10733
10734                         if (CHIP_REV_IS_SLOW(bp))
10735                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10736                         else
10737                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
10738
10739                         config->hdr.length = i;
10740                         config->hdr.offset = offset;
10741                         config->hdr.client_id = bp->fp->cl_id;
10742                         config->hdr.reserved1 = 0;
10743
10744                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10745                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10746                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10747                                       0);
10748                 } else { /* E1H */
10749                         /* Accept one or more multicasts */
10750                         struct dev_mc_list *mclist;
10751                         u32 mc_filter[MC_HASH_SIZE];
10752                         u32 crc, bit, regidx;
10753                         int i;
10754
10755                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10756
10757                         for (i = 0, mclist = dev->mc_list;
10758                              mclist && (i < dev->mc_count);
10759                              i++, mclist = mclist->next) {
10760
10761                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10762                                    mclist->dmi_addr);
10763
10764                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10765                                 bit = (crc >> 24) & 0xff;
10766                                 regidx = bit >> 5;
10767                                 bit &= 0x1f;
10768                                 mc_filter[regidx] |= (1 << bit);
10769                         }
10770
10771                         for (i = 0; i < MC_HASH_SIZE; i++)
10772                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10773                                        mc_filter[i]);
10774                 }
10775         }
10776
10777         bp->rx_mode = rx_mode;
10778         bnx2x_set_storm_rx_mode(bp);
10779 }
10780
10781 /* called with rtnl_lock */
10782 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10783 {
10784         struct sockaddr *addr = p;
10785         struct bnx2x *bp = netdev_priv(dev);
10786
10787         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10788                 return -EINVAL;
10789
10790         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10791         if (netif_running(dev)) {
10792                 if (CHIP_IS_E1(bp))
10793                         bnx2x_set_mac_addr_e1(bp, 1);
10794                 else
10795                         bnx2x_set_mac_addr_e1h(bp, 1);
10796         }
10797
10798         return 0;
10799 }
10800
10801 /* called with rtnl_lock */
10802 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10803 {
10804         struct mii_ioctl_data *data = if_mii(ifr);
10805         struct bnx2x *bp = netdev_priv(dev);
10806         int port = BP_PORT(bp);
10807         int err;
10808
10809         switch (cmd) {
10810         case SIOCGMIIPHY:
10811                 data->phy_id = bp->port.phy_addr;
10812
10813                 /* fallthrough */
10814
10815         case SIOCGMIIREG: {
10816                 u16 mii_regval;
10817
10818                 if (!netif_running(dev))
10819                         return -EAGAIN;
10820
10821                 mutex_lock(&bp->port.phy_mutex);
10822                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10823                                       DEFAULT_PHY_DEV_ADDR,
10824                                       (data->reg_num & 0x1f), &mii_regval);
10825                 data->val_out = mii_regval;
10826                 mutex_unlock(&bp->port.phy_mutex);
10827                 return err;
10828         }
10829
10830         case SIOCSMIIREG:
10831                 if (!capable(CAP_NET_ADMIN))
10832                         return -EPERM;
10833
10834                 if (!netif_running(dev))
10835                         return -EAGAIN;
10836
10837                 mutex_lock(&bp->port.phy_mutex);
10838                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10839                                        DEFAULT_PHY_DEV_ADDR,
10840                                        (data->reg_num & 0x1f), data->val_in);
10841                 mutex_unlock(&bp->port.phy_mutex);
10842                 return err;
10843
10844         default:
10845                 /* do nothing */
10846                 break;
10847         }
10848
10849         return -EOPNOTSUPP;
10850 }
10851
10852 /* called with rtnl_lock */
10853 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10854 {
10855         struct bnx2x *bp = netdev_priv(dev);
10856         int rc = 0;
10857
10858         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10859             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10860                 return -EINVAL;
10861
10862         /* This does not race with packet allocation
10863          * because the actual alloc size is
10864          * only updated as part of load
10865          */
10866         dev->mtu = new_mtu;
10867
10868         if (netif_running(dev)) {
10869                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10870                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10871         }
10872
10873         return rc;
10874 }
10875
10876 static void bnx2x_tx_timeout(struct net_device *dev)
10877 {
10878         struct bnx2x *bp = netdev_priv(dev);
10879
10880 #ifdef BNX2X_STOP_ON_ERROR
10881         if (!bp->panic)
10882                 bnx2x_panic();
10883 #endif
10884         /* This allows the netif to be shutdown gracefully before resetting */
10885         schedule_work(&bp->reset_task);
10886 }
10887
10888 #ifdef BCM_VLAN
10889 /* called with rtnl_lock */
10890 static void bnx2x_vlan_rx_register(struct net_device *dev,
10891                                    struct vlan_group *vlgrp)
10892 {
10893         struct bnx2x *bp = netdev_priv(dev);
10894
10895         bp->vlgrp = vlgrp;
10896
10897         /* Set flags according to the required capabilities */
10898         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10899
10900         if (dev->features & NETIF_F_HW_VLAN_TX)
10901                 bp->flags |= HW_VLAN_TX_FLAG;
10902
10903         if (dev->features & NETIF_F_HW_VLAN_RX)
10904                 bp->flags |= HW_VLAN_RX_FLAG;
10905
10906         if (netif_running(dev))
10907                 bnx2x_set_client_config(bp);
10908 }
10909
10910 #endif
10911
10912 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10913 static void poll_bnx2x(struct net_device *dev)
10914 {
10915         struct bnx2x *bp = netdev_priv(dev);
10916
10917         disable_irq(bp->pdev->irq);
10918         bnx2x_interrupt(bp->pdev->irq, dev);
10919         enable_irq(bp->pdev->irq);
10920 }
10921 #endif
10922
10923 static const struct net_device_ops bnx2x_netdev_ops = {
10924         .ndo_open               = bnx2x_open,
10925         .ndo_stop               = bnx2x_close,
10926         .ndo_start_xmit         = bnx2x_start_xmit,
10927         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10928         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10929         .ndo_validate_addr      = eth_validate_addr,
10930         .ndo_do_ioctl           = bnx2x_ioctl,
10931         .ndo_change_mtu         = bnx2x_change_mtu,
10932         .ndo_tx_timeout         = bnx2x_tx_timeout,
10933 #ifdef BCM_VLAN
10934         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10935 #endif
10936 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10937         .ndo_poll_controller    = poll_bnx2x,
10938 #endif
10939 };
10940
10941 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10942                                     struct net_device *dev)
10943 {
10944         struct bnx2x *bp;
10945         int rc;
10946
10947         SET_NETDEV_DEV(dev, &pdev->dev);
10948         bp = netdev_priv(dev);
10949
10950         bp->dev = dev;
10951         bp->pdev = pdev;
10952         bp->flags = 0;
10953         bp->func = PCI_FUNC(pdev->devfn);
10954
10955         rc = pci_enable_device(pdev);
10956         if (rc) {
10957                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10958                 goto err_out;
10959         }
10960
10961         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10962                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10963                        " aborting\n");
10964                 rc = -ENODEV;
10965                 goto err_out_disable;
10966         }
10967
10968         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10969                 printk(KERN_ERR PFX "Cannot find second PCI device"
10970                        " base address, aborting\n");
10971                 rc = -ENODEV;
10972                 goto err_out_disable;
10973         }
10974
10975         if (atomic_read(&pdev->enable_cnt) == 1) {
10976                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10977                 if (rc) {
10978                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10979                                " aborting\n");
10980                         goto err_out_disable;
10981                 }
10982
10983                 pci_set_master(pdev);
10984                 pci_save_state(pdev);
10985         }
10986
10987         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10988         if (bp->pm_cap == 0) {
10989                 printk(KERN_ERR PFX "Cannot find power management"
10990                        " capability, aborting\n");
10991                 rc = -EIO;
10992                 goto err_out_release;
10993         }
10994
10995         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10996         if (bp->pcie_cap == 0) {
10997                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10998                        " aborting\n");
10999                 rc = -EIO;
11000                 goto err_out_release;
11001         }
11002
11003         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11004                 bp->flags |= USING_DAC_FLAG;
11005                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11006                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11007                                " failed, aborting\n");
11008                         rc = -EIO;
11009                         goto err_out_release;
11010                 }
11011
11012         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11013                 printk(KERN_ERR PFX "System does not support DMA,"
11014                        " aborting\n");
11015                 rc = -EIO;
11016                 goto err_out_release;
11017         }
11018
11019         dev->mem_start = pci_resource_start(pdev, 0);
11020         dev->base_addr = dev->mem_start;
11021         dev->mem_end = pci_resource_end(pdev, 0);
11022
11023         dev->irq = pdev->irq;
11024
11025         bp->regview = pci_ioremap_bar(pdev, 0);
11026         if (!bp->regview) {
11027                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11028                 rc = -ENOMEM;
11029                 goto err_out_release;
11030         }
11031
11032         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11033                                         min_t(u64, BNX2X_DB_SIZE,
11034                                               pci_resource_len(pdev, 2)));
11035         if (!bp->doorbells) {
11036                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11037                 rc = -ENOMEM;
11038                 goto err_out_unmap;
11039         }
11040
11041         bnx2x_set_power_state(bp, PCI_D0);
11042
11043         /* clean indirect addresses */
11044         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11045                                PCICFG_VENDOR_ID_OFFSET);
11046         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11047         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11048         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11049         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11050
11051         dev->watchdog_timeo = TX_TIMEOUT;
11052
11053         dev->netdev_ops = &bnx2x_netdev_ops;
11054         dev->ethtool_ops = &bnx2x_ethtool_ops;
11055         dev->features |= NETIF_F_SG;
11056         dev->features |= NETIF_F_HW_CSUM;
11057         if (bp->flags & USING_DAC_FLAG)
11058                 dev->features |= NETIF_F_HIGHDMA;
11059 #ifdef BCM_VLAN
11060         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11061         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11062 #endif
11063         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11064         dev->features |= NETIF_F_TSO6;
11065
11066         return 0;
11067
11068 err_out_unmap:
11069         if (bp->regview) {
11070                 iounmap(bp->regview);
11071                 bp->regview = NULL;
11072         }
11073         if (bp->doorbells) {
11074                 iounmap(bp->doorbells);
11075                 bp->doorbells = NULL;
11076         }
11077
11078 err_out_release:
11079         if (atomic_read(&pdev->enable_cnt) == 1)
11080                 pci_release_regions(pdev);
11081
11082 err_out_disable:
11083         pci_disable_device(pdev);
11084         pci_set_drvdata(pdev, NULL);
11085
11086 err_out:
11087         return rc;
11088 }
11089
11090 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11091 {
11092         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11093
11094         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11095         return val;
11096 }
11097
11098 /* return value of 1=2.5GHz 2=5GHz */
11099 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11100 {
11101         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11102
11103         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11104         return val;
11105 }
11106 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11107 {
11108         struct bnx2x_fw_file_hdr *fw_hdr;
11109         struct bnx2x_fw_file_section *sections;
11110         u16 *ops_offsets;
11111         u32 offset, len, num_ops;
11112         int i;
11113         const struct firmware *firmware = bp->firmware;
11114         const u8 * fw_ver;
11115
11116         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11117                 return -EINVAL;
11118
11119         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11120         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11121
11122         /* Make sure none of the offsets and sizes make us read beyond
11123          * the end of the firmware data */
11124         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11125                 offset = be32_to_cpu(sections[i].offset);
11126                 len = be32_to_cpu(sections[i].len);
11127                 if (offset + len > firmware->size) {
11128                         printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11129                         return -EINVAL;
11130                 }
11131         }
11132
11133         /* Likewise for the init_ops offsets */
11134         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11135         ops_offsets = (u16 *)(firmware->data + offset);
11136         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11137
11138         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11139                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11140                         printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11141                         return -EINVAL;
11142                 }
11143         }
11144
11145         /* Check FW version */
11146         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11147         fw_ver = firmware->data + offset;
11148         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11149             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11150             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11151             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11152                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11153                                     " Should be %d.%d.%d.%d\n",
11154                        fw_ver[0], fw_ver[1], fw_ver[2],
11155                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11156                        BCM_5710_FW_MINOR_VERSION,
11157                        BCM_5710_FW_REVISION_VERSION,
11158                        BCM_5710_FW_ENGINEERING_VERSION);
11159                 return -EINVAL;
11160         }
11161
11162         return 0;
11163 }
11164
11165 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11166 {
11167         u32 i;
11168         const __be32 *source = (const __be32*)_source;
11169         u32 *target = (u32*)_target;
11170
11171         for (i = 0; i < n/4; i++)
11172                 target[i] = be32_to_cpu(source[i]);
11173 }
11174
11175 /*
11176    Ops array is stored in the following format:
11177    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11178  */
11179 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11180 {
11181         u32 i, j, tmp;
11182         const __be32 *source = (const __be32*)_source;
11183         struct raw_op *target = (struct raw_op*)_target;
11184
11185         for (i = 0, j = 0; i < n/8; i++, j+=2) {
11186                 tmp = be32_to_cpu(source[j]);
11187                 target[i].op = (tmp >> 24) & 0xff;
11188                 target[i].offset =  tmp & 0xffffff;
11189                 target[i].raw_data = be32_to_cpu(source[j+1]);
11190         }
11191 }
11192 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11193 {
11194         u32 i;
11195         u16 *target = (u16*)_target;
11196         const __be16 *source = (const __be16*)_source;
11197
11198         for (i = 0; i < n/2; i++)
11199                 target[i] = be16_to_cpu(source[i]);
11200 }
11201
11202 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11203         do {   \
11204                 u32 len = be32_to_cpu(fw_hdr->arr.len);   \
11205                 bp->arr = kmalloc(len, GFP_KERNEL);  \
11206                 if (!bp->arr) { \
11207                         printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11208                         goto lbl; \
11209                 } \
11210                 func(bp->firmware->data + \
11211                         be32_to_cpu(fw_hdr->arr.offset), \
11212                         (u8*)bp->arr, len); \
11213         } while (0)
11214
11215
11216 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11217 {
11218         char fw_file_name[40] = {0};
11219         int rc, offset;
11220         struct bnx2x_fw_file_hdr *fw_hdr;
11221
11222         /* Create a FW file name */
11223         if (CHIP_IS_E1(bp))
11224                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11225         else
11226                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11227
11228         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11229                 BCM_5710_FW_MAJOR_VERSION,
11230                 BCM_5710_FW_MINOR_VERSION,
11231                 BCM_5710_FW_REVISION_VERSION,
11232                 BCM_5710_FW_ENGINEERING_VERSION);
11233
11234         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11235
11236         rc = request_firmware(&bp->firmware, fw_file_name, dev);
11237         if (rc) {
11238                 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11239                 goto request_firmware_exit;
11240         }
11241
11242         rc = bnx2x_check_firmware(bp);
11243         if (rc) {
11244                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11245                 goto request_firmware_exit;
11246         }
11247
11248         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11249
11250         /* Initialize the pointers to the init arrays */
11251         /* Blob */
11252         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11253
11254         /* Opcodes */
11255         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11256
11257         /* Offsets */
11258         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11259
11260         /* STORMs firmware */
11261         bp->tsem_int_table_data = bp->firmware->data +
11262                 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11263         bp->tsem_pram_data      = bp->firmware->data +
11264                 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11265         bp->usem_int_table_data = bp->firmware->data +
11266                 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11267         bp->usem_pram_data      = bp->firmware->data +
11268                 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11269         bp->xsem_int_table_data = bp->firmware->data +
11270                 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11271         bp->xsem_pram_data      = bp->firmware->data +
11272                 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11273         bp->csem_int_table_data = bp->firmware->data +
11274                 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11275         bp->csem_pram_data      = bp->firmware->data +
11276                 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11277
11278         return 0;
11279 init_offsets_alloc_err:
11280         kfree(bp->init_ops);
11281 init_ops_alloc_err:
11282         kfree(bp->init_data);
11283 request_firmware_exit:
11284         release_firmware(bp->firmware);
11285
11286         return rc;
11287 }
11288
11289
11290
11291 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11292                                     const struct pci_device_id *ent)
11293 {
11294         static int version_printed;
11295         struct net_device *dev = NULL;
11296         struct bnx2x *bp;
11297         int rc;
11298
11299         if (version_printed++ == 0)
11300                 printk(KERN_INFO "%s", version);
11301
11302         /* dev zeroed in init_etherdev */
11303         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11304         if (!dev) {
11305                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11306                 return -ENOMEM;
11307         }
11308
11309         bp = netdev_priv(dev);
11310         bp->msglevel = debug;
11311
11312         rc = bnx2x_init_dev(pdev, dev);
11313         if (rc < 0) {
11314                 free_netdev(dev);
11315                 return rc;
11316         }
11317
11318         pci_set_drvdata(pdev, dev);
11319
11320         rc = bnx2x_init_bp(bp);
11321         if (rc)
11322                 goto init_one_exit;
11323
11324         /* Set init arrays */
11325         rc = bnx2x_init_firmware(bp, &pdev->dev);
11326         if (rc) {
11327                 printk(KERN_ERR PFX "Error loading firmware\n");
11328                 goto init_one_exit;
11329         }
11330
11331         rc = register_netdev(dev);
11332         if (rc) {
11333                 dev_err(&pdev->dev, "Cannot register net device\n");
11334                 goto init_one_exit;
11335         }
11336
11337         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11338                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11339                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11340                bnx2x_get_pcie_width(bp),
11341                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11342                dev->base_addr, bp->pdev->irq);
11343         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11344
11345         return 0;
11346
11347 init_one_exit:
11348         if (bp->regview)
11349                 iounmap(bp->regview);
11350
11351         if (bp->doorbells)
11352                 iounmap(bp->doorbells);
11353
11354         free_netdev(dev);
11355
11356         if (atomic_read(&pdev->enable_cnt) == 1)
11357                 pci_release_regions(pdev);
11358
11359         pci_disable_device(pdev);
11360         pci_set_drvdata(pdev, NULL);
11361
11362         return rc;
11363 }
11364
11365 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11366 {
11367         struct net_device *dev = pci_get_drvdata(pdev);
11368         struct bnx2x *bp;
11369
11370         if (!dev) {
11371                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11372                 return;
11373         }
11374         bp = netdev_priv(dev);
11375
11376         unregister_netdev(dev);
11377
11378         kfree(bp->init_ops_offsets);
11379         kfree(bp->init_ops);
11380         kfree(bp->init_data);
11381         release_firmware(bp->firmware);
11382
11383         if (bp->regview)
11384                 iounmap(bp->regview);
11385
11386         if (bp->doorbells)
11387                 iounmap(bp->doorbells);
11388
11389         free_netdev(dev);
11390
11391         if (atomic_read(&pdev->enable_cnt) == 1)
11392                 pci_release_regions(pdev);
11393
11394         pci_disable_device(pdev);
11395         pci_set_drvdata(pdev, NULL);
11396 }
11397
11398 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11399 {
11400         struct net_device *dev = pci_get_drvdata(pdev);
11401         struct bnx2x *bp;
11402
11403         if (!dev) {
11404                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11405                 return -ENODEV;
11406         }
11407         bp = netdev_priv(dev);
11408
11409         rtnl_lock();
11410
11411         pci_save_state(pdev);
11412
11413         if (!netif_running(dev)) {
11414                 rtnl_unlock();
11415                 return 0;
11416         }
11417
11418         netif_device_detach(dev);
11419
11420         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11421
11422         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11423
11424         rtnl_unlock();
11425
11426         return 0;
11427 }
11428
11429 static int bnx2x_resume(struct pci_dev *pdev)
11430 {
11431         struct net_device *dev = pci_get_drvdata(pdev);
11432         struct bnx2x *bp;
11433         int rc;
11434
11435         if (!dev) {
11436                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11437                 return -ENODEV;
11438         }
11439         bp = netdev_priv(dev);
11440
11441         rtnl_lock();
11442
11443         pci_restore_state(pdev);
11444
11445         if (!netif_running(dev)) {
11446                 rtnl_unlock();
11447                 return 0;
11448         }
11449
11450         bnx2x_set_power_state(bp, PCI_D0);
11451         netif_device_attach(dev);
11452
11453         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11454
11455         rtnl_unlock();
11456
11457         return rc;
11458 }
11459
11460 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11461 {
11462         int i;
11463
11464         bp->state = BNX2X_STATE_ERROR;
11465
11466         bp->rx_mode = BNX2X_RX_MODE_NONE;
11467
11468         bnx2x_netif_stop(bp, 0);
11469
11470         del_timer_sync(&bp->timer);
11471         bp->stats_state = STATS_STATE_DISABLED;
11472         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11473
11474         /* Release IRQs */
11475         bnx2x_free_irq(bp);
11476
11477         if (CHIP_IS_E1(bp)) {
11478                 struct mac_configuration_cmd *config =
11479                                                 bnx2x_sp(bp, mcast_config);
11480
11481                 for (i = 0; i < config->hdr.length; i++)
11482                         CAM_INVALIDATE(config->config_table[i]);
11483         }
11484
11485         /* Free SKBs, SGEs, TPA pool and driver internals */
11486         bnx2x_free_skbs(bp);
11487         for_each_rx_queue(bp, i)
11488                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11489         for_each_rx_queue(bp, i)
11490                 netif_napi_del(&bnx2x_fp(bp, i, napi));
11491         bnx2x_free_mem(bp);
11492
11493         bp->state = BNX2X_STATE_CLOSED;
11494
11495         netif_carrier_off(bp->dev);
11496
11497         return 0;
11498 }
11499
11500 static void bnx2x_eeh_recover(struct bnx2x *bp)
11501 {
11502         u32 val;
11503
11504         mutex_init(&bp->port.phy_mutex);
11505
11506         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11507         bp->link_params.shmem_base = bp->common.shmem_base;
11508         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11509
11510         if (!bp->common.shmem_base ||
11511             (bp->common.shmem_base < 0xA0000) ||
11512             (bp->common.shmem_base >= 0xC0000)) {
11513                 BNX2X_DEV_INFO("MCP not active\n");
11514                 bp->flags |= NO_MCP_FLAG;
11515                 return;
11516         }
11517
11518         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11519         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11520                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11521                 BNX2X_ERR("BAD MCP validity signature\n");
11522
11523         if (!BP_NOMCP(bp)) {
11524                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11525                               & DRV_MSG_SEQ_NUMBER_MASK);
11526                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11527         }
11528 }
11529
11530 /**
11531  * bnx2x_io_error_detected - called when PCI error is detected
11532  * @pdev: Pointer to PCI device
11533  * @state: The current pci connection state
11534  *
11535  * This function is called after a PCI bus error affecting
11536  * this device has been detected.
11537  */
11538 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11539                                                 pci_channel_state_t state)
11540 {
11541         struct net_device *dev = pci_get_drvdata(pdev);
11542         struct bnx2x *bp = netdev_priv(dev);
11543
11544         rtnl_lock();
11545
11546         netif_device_detach(dev);
11547
11548         if (netif_running(dev))
11549                 bnx2x_eeh_nic_unload(bp);
11550
11551         pci_disable_device(pdev);
11552
11553         rtnl_unlock();
11554
11555         /* Request a slot reset */
11556         return PCI_ERS_RESULT_NEED_RESET;
11557 }
11558
11559 /**
11560  * bnx2x_io_slot_reset - called after the PCI bus has been reset
11561  * @pdev: Pointer to PCI device
11562  *
11563  * Restart the card from scratch, as if from a cold-boot.
11564  */
11565 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11566 {
11567         struct net_device *dev = pci_get_drvdata(pdev);
11568         struct bnx2x *bp = netdev_priv(dev);
11569
11570         rtnl_lock();
11571
11572         if (pci_enable_device(pdev)) {
11573                 dev_err(&pdev->dev,
11574                         "Cannot re-enable PCI device after reset\n");
11575                 rtnl_unlock();
11576                 return PCI_ERS_RESULT_DISCONNECT;
11577         }
11578
11579         pci_set_master(pdev);
11580         pci_restore_state(pdev);
11581
11582         if (netif_running(dev))
11583                 bnx2x_set_power_state(bp, PCI_D0);
11584
11585         rtnl_unlock();
11586
11587         return PCI_ERS_RESULT_RECOVERED;
11588 }
11589
11590 /**
11591  * bnx2x_io_resume - called when traffic can start flowing again
11592  * @pdev: Pointer to PCI device
11593  *
11594  * This callback is called when the error recovery driver tells us that
11595  * its OK to resume normal operation.
11596  */
11597 static void bnx2x_io_resume(struct pci_dev *pdev)
11598 {
11599         struct net_device *dev = pci_get_drvdata(pdev);
11600         struct bnx2x *bp = netdev_priv(dev);
11601
11602         rtnl_lock();
11603
11604         bnx2x_eeh_recover(bp);
11605
11606         if (netif_running(dev))
11607                 bnx2x_nic_load(bp, LOAD_NORMAL);
11608
11609         netif_device_attach(dev);
11610
11611         rtnl_unlock();
11612 }
11613
11614 static struct pci_error_handlers bnx2x_err_handler = {
11615         .error_detected = bnx2x_io_error_detected,
11616         .slot_reset     = bnx2x_io_slot_reset,
11617         .resume         = bnx2x_io_resume,
11618 };
11619
11620 static struct pci_driver bnx2x_pci_driver = {
11621         .name        = DRV_MODULE_NAME,
11622         .id_table    = bnx2x_pci_tbl,
11623         .probe       = bnx2x_init_one,
11624         .remove      = __devexit_p(bnx2x_remove_one),
11625         .suspend     = bnx2x_suspend,
11626         .resume      = bnx2x_resume,
11627         .err_handler = &bnx2x_err_handler,
11628 };
11629
11630 static int __init bnx2x_init(void)
11631 {
11632         int ret;
11633
11634         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11635         if (bnx2x_wq == NULL) {
11636                 printk(KERN_ERR PFX "Cannot create workqueue\n");
11637                 return -ENOMEM;
11638         }
11639
11640         ret = pci_register_driver(&bnx2x_pci_driver);
11641         if (ret) {
11642                 printk(KERN_ERR PFX "Cannot register driver\n");
11643                 destroy_workqueue(bnx2x_wq);
11644         }
11645         return ret;
11646 }
11647
11648 static void __exit bnx2x_cleanup(void)
11649 {
11650         pci_unregister_driver(&bnx2x_pci_driver);
11651
11652         destroy_workqueue(bnx2x_wq);
11653 }
11654
11655 module_init(bnx2x_init);
11656 module_exit(bnx2x_cleanup);
11657
11658