drivers/net: use vzalloc()
[pandora-kernel.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/checksum.h>
43 #include <linux/workqueue.h>
44 #include <linux/crc32.h>
45 #include <linux/prefetch.h>
46 #include <linux/cache.h>
47 #include <linux/firmware.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50
51 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
52 #define BCM_CNIC 1
53 #include "cnic_if.h"
54 #endif
55 #include "bnx2.h"
56 #include "bnx2_fw.h"
57
58 #define DRV_MODULE_NAME         "bnx2"
59 #define DRV_MODULE_VERSION      "2.0.18"
60 #define DRV_MODULE_RELDATE      "Oct 7, 2010"
61 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.0.15.fw"
62 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
63 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.0.17.fw"
64 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
66
67 #define RUN_AT(x) (jiffies + (x))
68
69 /* Time in jiffies before concluding the transmitter is hung. */
70 #define TX_TIMEOUT  (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74
75 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
76 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(DRV_MODULE_VERSION);
79 MODULE_FIRMWARE(FW_MIPS_FILE_06);
80 MODULE_FIRMWARE(FW_RV2P_FILE_06);
81 MODULE_FIRMWARE(FW_MIPS_FILE_09);
82 MODULE_FIRMWARE(FW_RV2P_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
84
85 static int disable_msi = 0;
86
87 module_param(disable_msi, int, 0);
88 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
89
90 typedef enum {
91         BCM5706 = 0,
92         NC370T,
93         NC370I,
94         BCM5706S,
95         NC370F,
96         BCM5708,
97         BCM5708S,
98         BCM5709,
99         BCM5709S,
100         BCM5716,
101         BCM5716S,
102 } board_t;
103
104 /* indexed by board_t, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
109         { "HP NC370T Multifunction Gigabit Server Adapter" },
110         { "HP NC370i Multifunction Gigabit Server Adapter" },
111         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
112         { "HP NC370F Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
114         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
115         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
119         };
120
121 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
131           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
140         { PCI_VENDOR_ID_BROADCOM, 0x163b,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
142         { PCI_VENDOR_ID_BROADCOM, 0x163c,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
144         { 0, }
145 };
146
147 static const struct flash_spec flash_table[] =
148 {
149 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
150 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
151         /* Slow EEPROM */
152         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
153          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
154          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
155          "EEPROM - slow"},
156         /* Expansion entry 0001 */
157         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
158          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
159          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160          "Entry 0001"},
161         /* Saifun SA25F010 (non-buffered flash) */
162         /* strap, cfg1, & write1 need updates */
163         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
164          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
166          "Non-buffered flash (128kB)"},
167         /* Saifun SA25F020 (non-buffered flash) */
168         /* strap, cfg1, & write1 need updates */
169         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
170          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
172          "Non-buffered flash (256kB)"},
173         /* Expansion entry 0100 */
174         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
175          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177          "Entry 0100"},
178         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
179         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
180          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
181          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
182          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
183         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
184         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
185          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
186          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
187          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
188         /* Saifun SA25F005 (non-buffered flash) */
189         /* strap, cfg1, & write1 need updates */
190         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
191          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
192          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
193          "Non-buffered flash (64kB)"},
194         /* Fast EEPROM */
195         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
196          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
197          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
198          "EEPROM - fast"},
199         /* Expansion entry 1001 */
200         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
201          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
202          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
203          "Entry 1001"},
204         /* Expansion entry 1010 */
205         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
206          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
207          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
208          "Entry 1010"},
209         /* ATMEL AT45DB011B (buffered flash) */
210         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
211          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
212          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
213          "Buffered flash (128kB)"},
214         /* Expansion entry 1100 */
215         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
216          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
218          "Entry 1100"},
219         /* Expansion entry 1101 */
220         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
221          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223          "Entry 1101"},
224         /* Ateml Expansion entry 1110 */
225         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
226          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
227          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
228          "Entry 1110 (Atmel)"},
229         /* ATMEL AT45DB021B (buffered flash) */
230         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
231          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
232          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
233          "Buffered flash (256kB)"},
234 };
235
236 static const struct flash_spec flash_5709 = {
237         .flags          = BNX2_NV_BUFFERED,
238         .page_bits      = BCM5709_FLASH_PAGE_BITS,
239         .page_size      = BCM5709_FLASH_PAGE_SIZE,
240         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
241         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
242         .name           = "5709 Buffered flash (256kB)",
243 };
244
245 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
246
247 static void bnx2_init_napi(struct bnx2 *bp);
248 static void bnx2_del_napi(struct bnx2 *bp);
249
250 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
251 {
252         u32 diff;
253
254         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
255         barrier();
256
257         /* The ring uses 256 indices for 255 entries, one of them
258          * needs to be skipped.
259          */
260         diff = txr->tx_prod - txr->tx_cons;
261         if (unlikely(diff >= TX_DESC_CNT)) {
262                 diff &= 0xffff;
263                 if (diff == TX_DESC_CNT)
264                         diff = MAX_TX_DESC_CNT;
265         }
266         return bp->tx_ring_size - diff;
267 }
268
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272         u32 val;
273
274         spin_lock_bh(&bp->indirect_lock);
275         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277         spin_unlock_bh(&bp->indirect_lock);
278         return val;
279 }
280
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 {
284         spin_lock_bh(&bp->indirect_lock);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287         spin_unlock_bh(&bp->indirect_lock);
288 }
289
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292 {
293         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294 }
295
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298 {
299         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
300 }
301
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 {
305         offset += cid_addr;
306         spin_lock_bh(&bp->indirect_lock);
307         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308                 int i;
309
310                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313                 for (i = 0; i < 5; i++) {
314                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316                                 break;
317                         udelay(5);
318                 }
319         } else {
320                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321                 REG_WR(bp, BNX2_CTX_DATA, val);
322         }
323         spin_unlock_bh(&bp->indirect_lock);
324 }
325
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329 {
330         struct bnx2 *bp = netdev_priv(dev);
331         struct drv_ctl_io *io = &info->data.io;
332
333         switch (info->cmd) {
334         case DRV_CTL_IO_WR_CMD:
335                 bnx2_reg_wr_ind(bp, io->offset, io->data);
336                 break;
337         case DRV_CTL_IO_RD_CMD:
338                 io->data = bnx2_reg_rd_ind(bp, io->offset);
339                 break;
340         case DRV_CTL_CTX_WR_CMD:
341                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342                 break;
343         default:
344                 return -EINVAL;
345         }
346         return 0;
347 }
348
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350 {
351         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353         int sb_id;
354
355         if (bp->flags & BNX2_FLAG_USING_MSIX) {
356                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357                 bnapi->cnic_present = 0;
358                 sb_id = bp->irq_nvecs;
359                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360         } else {
361                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362                 bnapi->cnic_tag = bnapi->last_status_idx;
363                 bnapi->cnic_present = 1;
364                 sb_id = 0;
365                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366         }
367
368         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369         cp->irq_arr[0].status_blk = (void *)
370                 ((unsigned long) bnapi->status_blk.msi +
371                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372         cp->irq_arr[0].status_blk_num = sb_id;
373         cp->num_irq = 1;
374 }
375
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377                               void *data)
378 {
379         struct bnx2 *bp = netdev_priv(dev);
380         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382         if (ops == NULL)
383                 return -EINVAL;
384
385         if (cp->drv_state & CNIC_DRV_STATE_REGD)
386                 return -EBUSY;
387
388         bp->cnic_data = data;
389         rcu_assign_pointer(bp->cnic_ops, ops);
390
391         cp->num_irq = 0;
392         cp->drv_state = CNIC_DRV_STATE_REGD;
393
394         bnx2_setup_cnic_irq_info(bp);
395
396         return 0;
397 }
398
399 static int bnx2_unregister_cnic(struct net_device *dev)
400 {
401         struct bnx2 *bp = netdev_priv(dev);
402         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404
405         mutex_lock(&bp->cnic_lock);
406         cp->drv_state = 0;
407         bnapi->cnic_present = 0;
408         rcu_assign_pointer(bp->cnic_ops, NULL);
409         mutex_unlock(&bp->cnic_lock);
410         synchronize_rcu();
411         return 0;
412 }
413
414 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415 {
416         struct bnx2 *bp = netdev_priv(dev);
417         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418
419         cp->drv_owner = THIS_MODULE;
420         cp->chip_id = bp->chip_id;
421         cp->pdev = bp->pdev;
422         cp->io_base = bp->regview;
423         cp->drv_ctl = bnx2_drv_ctl;
424         cp->drv_register_cnic = bnx2_register_cnic;
425         cp->drv_unregister_cnic = bnx2_unregister_cnic;
426
427         return cp;
428 }
429 EXPORT_SYMBOL(bnx2_cnic_probe);
430
431 static void
432 bnx2_cnic_stop(struct bnx2 *bp)
433 {
434         struct cnic_ops *c_ops;
435         struct cnic_ctl_info info;
436
437         mutex_lock(&bp->cnic_lock);
438         c_ops = bp->cnic_ops;
439         if (c_ops) {
440                 info.cmd = CNIC_CTL_STOP_CMD;
441                 c_ops->cnic_ctl(bp->cnic_data, &info);
442         }
443         mutex_unlock(&bp->cnic_lock);
444 }
445
446 static void
447 bnx2_cnic_start(struct bnx2 *bp)
448 {
449         struct cnic_ops *c_ops;
450         struct cnic_ctl_info info;
451
452         mutex_lock(&bp->cnic_lock);
453         c_ops = bp->cnic_ops;
454         if (c_ops) {
455                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
457
458                         bnapi->cnic_tag = bnapi->last_status_idx;
459                 }
460                 info.cmd = CNIC_CTL_START_CMD;
461                 c_ops->cnic_ctl(bp->cnic_data, &info);
462         }
463         mutex_unlock(&bp->cnic_lock);
464 }
465
466 #else
467
468 static void
469 bnx2_cnic_stop(struct bnx2 *bp)
470 {
471 }
472
473 static void
474 bnx2_cnic_start(struct bnx2 *bp)
475 {
476 }
477
478 #endif
479
480 static int
481 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
482 {
483         u32 val1;
484         int i, ret;
485
486         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
487                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
488                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
489
490                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
491                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
492
493                 udelay(40);
494         }
495
496         val1 = (bp->phy_addr << 21) | (reg << 16) |
497                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
498                 BNX2_EMAC_MDIO_COMM_START_BUSY;
499         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
500
501         for (i = 0; i < 50; i++) {
502                 udelay(10);
503
504                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
505                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
506                         udelay(5);
507
508                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
509                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
510
511                         break;
512                 }
513         }
514
515         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
516                 *val = 0x0;
517                 ret = -EBUSY;
518         }
519         else {
520                 *val = val1;
521                 ret = 0;
522         }
523
524         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
525                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
526                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
527
528                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
529                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
530
531                 udelay(40);
532         }
533
534         return ret;
535 }
536
537 static int
538 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
539 {
540         u32 val1;
541         int i, ret;
542
543         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
544                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
545                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
546
547                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
548                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
549
550                 udelay(40);
551         }
552
553         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
554                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
555                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
556         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
557
558         for (i = 0; i < 50; i++) {
559                 udelay(10);
560
561                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
562                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
563                         udelay(5);
564                         break;
565                 }
566         }
567
568         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
569                 ret = -EBUSY;
570         else
571                 ret = 0;
572
573         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
574                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
575                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
576
577                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
578                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
579
580                 udelay(40);
581         }
582
583         return ret;
584 }
585
586 static void
587 bnx2_disable_int(struct bnx2 *bp)
588 {
589         int i;
590         struct bnx2_napi *bnapi;
591
592         for (i = 0; i < bp->irq_nvecs; i++) {
593                 bnapi = &bp->bnx2_napi[i];
594                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
595                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
596         }
597         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
598 }
599
600 static void
601 bnx2_enable_int(struct bnx2 *bp)
602 {
603         int i;
604         struct bnx2_napi *bnapi;
605
606         for (i = 0; i < bp->irq_nvecs; i++) {
607                 bnapi = &bp->bnx2_napi[i];
608
609                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
611                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
612                        bnapi->last_status_idx);
613
614                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616                        bnapi->last_status_idx);
617         }
618         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
619 }
620
621 static void
622 bnx2_disable_int_sync(struct bnx2 *bp)
623 {
624         int i;
625
626         atomic_inc(&bp->intr_sem);
627         if (!netif_running(bp->dev))
628                 return;
629
630         bnx2_disable_int(bp);
631         for (i = 0; i < bp->irq_nvecs; i++)
632                 synchronize_irq(bp->irq_tbl[i].vector);
633 }
634
635 static void
636 bnx2_napi_disable(struct bnx2 *bp)
637 {
638         int i;
639
640         for (i = 0; i < bp->irq_nvecs; i++)
641                 napi_disable(&bp->bnx2_napi[i].napi);
642 }
643
644 static void
645 bnx2_napi_enable(struct bnx2 *bp)
646 {
647         int i;
648
649         for (i = 0; i < bp->irq_nvecs; i++)
650                 napi_enable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
655 {
656         if (stop_cnic)
657                 bnx2_cnic_stop(bp);
658         if (netif_running(bp->dev)) {
659                 bnx2_napi_disable(bp);
660                 netif_tx_disable(bp->dev);
661         }
662         bnx2_disable_int_sync(bp);
663         netif_carrier_off(bp->dev);     /* prevent tx timeout */
664 }
665
666 static void
667 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
668 {
669         if (atomic_dec_and_test(&bp->intr_sem)) {
670                 if (netif_running(bp->dev)) {
671                         netif_tx_wake_all_queues(bp->dev);
672                         spin_lock_bh(&bp->phy_lock);
673                         if (bp->link_up)
674                                 netif_carrier_on(bp->dev);
675                         spin_unlock_bh(&bp->phy_lock);
676                         bnx2_napi_enable(bp);
677                         bnx2_enable_int(bp);
678                         if (start_cnic)
679                                 bnx2_cnic_start(bp);
680                 }
681         }
682 }
683
684 static void
685 bnx2_free_tx_mem(struct bnx2 *bp)
686 {
687         int i;
688
689         for (i = 0; i < bp->num_tx_rings; i++) {
690                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
691                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
692
693                 if (txr->tx_desc_ring) {
694                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
695                                           txr->tx_desc_ring,
696                                           txr->tx_desc_mapping);
697                         txr->tx_desc_ring = NULL;
698                 }
699                 kfree(txr->tx_buf_ring);
700                 txr->tx_buf_ring = NULL;
701         }
702 }
703
704 static void
705 bnx2_free_rx_mem(struct bnx2 *bp)
706 {
707         int i;
708
709         for (i = 0; i < bp->num_rx_rings; i++) {
710                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
711                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
712                 int j;
713
714                 for (j = 0; j < bp->rx_max_ring; j++) {
715                         if (rxr->rx_desc_ring[j])
716                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
717                                                   rxr->rx_desc_ring[j],
718                                                   rxr->rx_desc_mapping[j]);
719                         rxr->rx_desc_ring[j] = NULL;
720                 }
721                 vfree(rxr->rx_buf_ring);
722                 rxr->rx_buf_ring = NULL;
723
724                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
725                         if (rxr->rx_pg_desc_ring[j])
726                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
727                                                   rxr->rx_pg_desc_ring[j],
728                                                   rxr->rx_pg_desc_mapping[j]);
729                         rxr->rx_pg_desc_ring[j] = NULL;
730                 }
731                 vfree(rxr->rx_pg_ring);
732                 rxr->rx_pg_ring = NULL;
733         }
734 }
735
736 static int
737 bnx2_alloc_tx_mem(struct bnx2 *bp)
738 {
739         int i;
740
741         for (i = 0; i < bp->num_tx_rings; i++) {
742                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
743                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
744
745                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
746                 if (txr->tx_buf_ring == NULL)
747                         return -ENOMEM;
748
749                 txr->tx_desc_ring =
750                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
751                                            &txr->tx_desc_mapping, GFP_KERNEL);
752                 if (txr->tx_desc_ring == NULL)
753                         return -ENOMEM;
754         }
755         return 0;
756 }
757
758 static int
759 bnx2_alloc_rx_mem(struct bnx2 *bp)
760 {
761         int i;
762
763         for (i = 0; i < bp->num_rx_rings; i++) {
764                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
765                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
766                 int j;
767
768                 rxr->rx_buf_ring =
769                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
770                 if (rxr->rx_buf_ring == NULL)
771                         return -ENOMEM;
772
773                 for (j = 0; j < bp->rx_max_ring; j++) {
774                         rxr->rx_desc_ring[j] =
775                                 dma_alloc_coherent(&bp->pdev->dev,
776                                                    RXBD_RING_SIZE,
777                                                    &rxr->rx_desc_mapping[j],
778                                                    GFP_KERNEL);
779                         if (rxr->rx_desc_ring[j] == NULL)
780                                 return -ENOMEM;
781
782                 }
783
784                 if (bp->rx_pg_ring_size) {
785                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
786                                                   bp->rx_max_pg_ring);
787                         if (rxr->rx_pg_ring == NULL)
788                                 return -ENOMEM;
789
790                 }
791
792                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
793                         rxr->rx_pg_desc_ring[j] =
794                                 dma_alloc_coherent(&bp->pdev->dev,
795                                                    RXBD_RING_SIZE,
796                                                    &rxr->rx_pg_desc_mapping[j],
797                                                    GFP_KERNEL);
798                         if (rxr->rx_pg_desc_ring[j] == NULL)
799                                 return -ENOMEM;
800
801                 }
802         }
803         return 0;
804 }
805
806 static void
807 bnx2_free_mem(struct bnx2 *bp)
808 {
809         int i;
810         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
811
812         bnx2_free_tx_mem(bp);
813         bnx2_free_rx_mem(bp);
814
815         for (i = 0; i < bp->ctx_pages; i++) {
816                 if (bp->ctx_blk[i]) {
817                         dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
818                                           bp->ctx_blk[i],
819                                           bp->ctx_blk_mapping[i]);
820                         bp->ctx_blk[i] = NULL;
821                 }
822         }
823         if (bnapi->status_blk.msi) {
824                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
825                                   bnapi->status_blk.msi,
826                                   bp->status_blk_mapping);
827                 bnapi->status_blk.msi = NULL;
828                 bp->stats_blk = NULL;
829         }
830 }
831
832 static int
833 bnx2_alloc_mem(struct bnx2 *bp)
834 {
835         int i, status_blk_size, err;
836         struct bnx2_napi *bnapi;
837         void *status_blk;
838
839         /* Combine status and statistics blocks into one allocation. */
840         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
841         if (bp->flags & BNX2_FLAG_MSIX_CAP)
842                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
843                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
844         bp->status_stats_size = status_blk_size +
845                                 sizeof(struct statistics_block);
846
847         status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
848                                         &bp->status_blk_mapping, GFP_KERNEL);
849         if (status_blk == NULL)
850                 goto alloc_mem_err;
851
852         memset(status_blk, 0, bp->status_stats_size);
853
854         bnapi = &bp->bnx2_napi[0];
855         bnapi->status_blk.msi = status_blk;
856         bnapi->hw_tx_cons_ptr =
857                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
858         bnapi->hw_rx_cons_ptr =
859                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
860         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
861                 for (i = 1; i < bp->irq_nvecs; i++) {
862                         struct status_block_msix *sblk;
863
864                         bnapi = &bp->bnx2_napi[i];
865
866                         sblk = (void *) (status_blk +
867                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
868                         bnapi->status_blk.msix = sblk;
869                         bnapi->hw_tx_cons_ptr =
870                                 &sblk->status_tx_quick_consumer_index;
871                         bnapi->hw_rx_cons_ptr =
872                                 &sblk->status_rx_quick_consumer_index;
873                         bnapi->int_num = i << 24;
874                 }
875         }
876
877         bp->stats_blk = status_blk + status_blk_size;
878
879         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
880
881         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
882                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
883                 if (bp->ctx_pages == 0)
884                         bp->ctx_pages = 1;
885                 for (i = 0; i < bp->ctx_pages; i++) {
886                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
887                                                 BCM_PAGE_SIZE,
888                                                 &bp->ctx_blk_mapping[i],
889                                                 GFP_KERNEL);
890                         if (bp->ctx_blk[i] == NULL)
891                                 goto alloc_mem_err;
892                 }
893         }
894
895         err = bnx2_alloc_rx_mem(bp);
896         if (err)
897                 goto alloc_mem_err;
898
899         err = bnx2_alloc_tx_mem(bp);
900         if (err)
901                 goto alloc_mem_err;
902
903         return 0;
904
905 alloc_mem_err:
906         bnx2_free_mem(bp);
907         return -ENOMEM;
908 }
909
910 static void
911 bnx2_report_fw_link(struct bnx2 *bp)
912 {
913         u32 fw_link_status = 0;
914
915         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
916                 return;
917
918         if (bp->link_up) {
919                 u32 bmsr;
920
921                 switch (bp->line_speed) {
922                 case SPEED_10:
923                         if (bp->duplex == DUPLEX_HALF)
924                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
925                         else
926                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
927                         break;
928                 case SPEED_100:
929                         if (bp->duplex == DUPLEX_HALF)
930                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
931                         else
932                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
933                         break;
934                 case SPEED_1000:
935                         if (bp->duplex == DUPLEX_HALF)
936                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
937                         else
938                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
939                         break;
940                 case SPEED_2500:
941                         if (bp->duplex == DUPLEX_HALF)
942                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
943                         else
944                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
945                         break;
946                 }
947
948                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
949
950                 if (bp->autoneg) {
951                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
952
953                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
954                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
955
956                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
957                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
958                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
959                         else
960                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
961                 }
962         }
963         else
964                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
965
966         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
967 }
968
969 static char *
970 bnx2_xceiver_str(struct bnx2 *bp)
971 {
972         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
973                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
974                  "Copper");
975 }
976
977 static void
978 bnx2_report_link(struct bnx2 *bp)
979 {
980         if (bp->link_up) {
981                 netif_carrier_on(bp->dev);
982                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
983                             bnx2_xceiver_str(bp),
984                             bp->line_speed,
985                             bp->duplex == DUPLEX_FULL ? "full" : "half");
986
987                 if (bp->flow_ctrl) {
988                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
989                                 pr_cont(", receive ");
990                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
991                                         pr_cont("& transmit ");
992                         }
993                         else {
994                                 pr_cont(", transmit ");
995                         }
996                         pr_cont("flow control ON");
997                 }
998                 pr_cont("\n");
999         } else {
1000                 netif_carrier_off(bp->dev);
1001                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1002                            bnx2_xceiver_str(bp));
1003         }
1004
1005         bnx2_report_fw_link(bp);
1006 }
1007
1008 static void
1009 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1010 {
1011         u32 local_adv, remote_adv;
1012
1013         bp->flow_ctrl = 0;
1014         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1015                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1016
1017                 if (bp->duplex == DUPLEX_FULL) {
1018                         bp->flow_ctrl = bp->req_flow_ctrl;
1019                 }
1020                 return;
1021         }
1022
1023         if (bp->duplex != DUPLEX_FULL) {
1024                 return;
1025         }
1026
1027         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1028             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1029                 u32 val;
1030
1031                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1032                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1033                         bp->flow_ctrl |= FLOW_CTRL_TX;
1034                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1035                         bp->flow_ctrl |= FLOW_CTRL_RX;
1036                 return;
1037         }
1038
1039         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1040         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1041
1042         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1043                 u32 new_local_adv = 0;
1044                 u32 new_remote_adv = 0;
1045
1046                 if (local_adv & ADVERTISE_1000XPAUSE)
1047                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1048                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1049                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1050                 if (remote_adv & ADVERTISE_1000XPAUSE)
1051                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1052                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1053                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1054
1055                 local_adv = new_local_adv;
1056                 remote_adv = new_remote_adv;
1057         }
1058
1059         /* See Table 28B-3 of 802.3ab-1999 spec. */
1060         if (local_adv & ADVERTISE_PAUSE_CAP) {
1061                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1062                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1063                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1064                         }
1065                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1066                                 bp->flow_ctrl = FLOW_CTRL_RX;
1067                         }
1068                 }
1069                 else {
1070                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1072                         }
1073                 }
1074         }
1075         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1076                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1077                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1078
1079                         bp->flow_ctrl = FLOW_CTRL_TX;
1080                 }
1081         }
1082 }
1083
1084 static int
1085 bnx2_5709s_linkup(struct bnx2 *bp)
1086 {
1087         u32 val, speed;
1088
1089         bp->link_up = 1;
1090
1091         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1092         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1093         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1094
1095         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1096                 bp->line_speed = bp->req_line_speed;
1097                 bp->duplex = bp->req_duplex;
1098                 return 0;
1099         }
1100         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1101         switch (speed) {
1102                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1103                         bp->line_speed = SPEED_10;
1104                         break;
1105                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1106                         bp->line_speed = SPEED_100;
1107                         break;
1108                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1109                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1110                         bp->line_speed = SPEED_1000;
1111                         break;
1112                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1113                         bp->line_speed = SPEED_2500;
1114                         break;
1115         }
1116         if (val & MII_BNX2_GP_TOP_AN_FD)
1117                 bp->duplex = DUPLEX_FULL;
1118         else
1119                 bp->duplex = DUPLEX_HALF;
1120         return 0;
1121 }
1122
1123 static int
1124 bnx2_5708s_linkup(struct bnx2 *bp)
1125 {
1126         u32 val;
1127
1128         bp->link_up = 1;
1129         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1130         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1131                 case BCM5708S_1000X_STAT1_SPEED_10:
1132                         bp->line_speed = SPEED_10;
1133                         break;
1134                 case BCM5708S_1000X_STAT1_SPEED_100:
1135                         bp->line_speed = SPEED_100;
1136                         break;
1137                 case BCM5708S_1000X_STAT1_SPEED_1G:
1138                         bp->line_speed = SPEED_1000;
1139                         break;
1140                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1141                         bp->line_speed = SPEED_2500;
1142                         break;
1143         }
1144         if (val & BCM5708S_1000X_STAT1_FD)
1145                 bp->duplex = DUPLEX_FULL;
1146         else
1147                 bp->duplex = DUPLEX_HALF;
1148
1149         return 0;
1150 }
1151
1152 static int
1153 bnx2_5706s_linkup(struct bnx2 *bp)
1154 {
1155         u32 bmcr, local_adv, remote_adv, common;
1156
1157         bp->link_up = 1;
1158         bp->line_speed = SPEED_1000;
1159
1160         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1161         if (bmcr & BMCR_FULLDPLX) {
1162                 bp->duplex = DUPLEX_FULL;
1163         }
1164         else {
1165                 bp->duplex = DUPLEX_HALF;
1166         }
1167
1168         if (!(bmcr & BMCR_ANENABLE)) {
1169                 return 0;
1170         }
1171
1172         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1173         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1174
1175         common = local_adv & remote_adv;
1176         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1177
1178                 if (common & ADVERTISE_1000XFULL) {
1179                         bp->duplex = DUPLEX_FULL;
1180                 }
1181                 else {
1182                         bp->duplex = DUPLEX_HALF;
1183                 }
1184         }
1185
1186         return 0;
1187 }
1188
1189 static int
1190 bnx2_copper_linkup(struct bnx2 *bp)
1191 {
1192         u32 bmcr;
1193
1194         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1195         if (bmcr & BMCR_ANENABLE) {
1196                 u32 local_adv, remote_adv, common;
1197
1198                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1199                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1200
1201                 common = local_adv & (remote_adv >> 2);
1202                 if (common & ADVERTISE_1000FULL) {
1203                         bp->line_speed = SPEED_1000;
1204                         bp->duplex = DUPLEX_FULL;
1205                 }
1206                 else if (common & ADVERTISE_1000HALF) {
1207                         bp->line_speed = SPEED_1000;
1208                         bp->duplex = DUPLEX_HALF;
1209                 }
1210                 else {
1211                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1212                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1213
1214                         common = local_adv & remote_adv;
1215                         if (common & ADVERTISE_100FULL) {
1216                                 bp->line_speed = SPEED_100;
1217                                 bp->duplex = DUPLEX_FULL;
1218                         }
1219                         else if (common & ADVERTISE_100HALF) {
1220                                 bp->line_speed = SPEED_100;
1221                                 bp->duplex = DUPLEX_HALF;
1222                         }
1223                         else if (common & ADVERTISE_10FULL) {
1224                                 bp->line_speed = SPEED_10;
1225                                 bp->duplex = DUPLEX_FULL;
1226                         }
1227                         else if (common & ADVERTISE_10HALF) {
1228                                 bp->line_speed = SPEED_10;
1229                                 bp->duplex = DUPLEX_HALF;
1230                         }
1231                         else {
1232                                 bp->line_speed = 0;
1233                                 bp->link_up = 0;
1234                         }
1235                 }
1236         }
1237         else {
1238                 if (bmcr & BMCR_SPEED100) {
1239                         bp->line_speed = SPEED_100;
1240                 }
1241                 else {
1242                         bp->line_speed = SPEED_10;
1243                 }
1244                 if (bmcr & BMCR_FULLDPLX) {
1245                         bp->duplex = DUPLEX_FULL;
1246                 }
1247                 else {
1248                         bp->duplex = DUPLEX_HALF;
1249                 }
1250         }
1251
1252         return 0;
1253 }
1254
1255 static void
1256 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1257 {
1258         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1259
1260         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1261         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1262         val |= 0x02 << 8;
1263
1264         if (bp->flow_ctrl & FLOW_CTRL_TX)
1265                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1266
1267         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1268 }
1269
1270 static void
1271 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1272 {
1273         int i;
1274         u32 cid;
1275
1276         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1277                 if (i == 1)
1278                         cid = RX_RSS_CID;
1279                 bnx2_init_rx_context(bp, cid);
1280         }
1281 }
1282
1283 static void
1284 bnx2_set_mac_link(struct bnx2 *bp)
1285 {
1286         u32 val;
1287
1288         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1289         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1290                 (bp->duplex == DUPLEX_HALF)) {
1291                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1292         }
1293
1294         /* Configure the EMAC mode register. */
1295         val = REG_RD(bp, BNX2_EMAC_MODE);
1296
1297         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1298                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1299                 BNX2_EMAC_MODE_25G_MODE);
1300
1301         if (bp->link_up) {
1302                 switch (bp->line_speed) {
1303                         case SPEED_10:
1304                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1305                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1306                                         break;
1307                                 }
1308                                 /* fall through */
1309                         case SPEED_100:
1310                                 val |= BNX2_EMAC_MODE_PORT_MII;
1311                                 break;
1312                         case SPEED_2500:
1313                                 val |= BNX2_EMAC_MODE_25G_MODE;
1314                                 /* fall through */
1315                         case SPEED_1000:
1316                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1317                                 break;
1318                 }
1319         }
1320         else {
1321                 val |= BNX2_EMAC_MODE_PORT_GMII;
1322         }
1323
1324         /* Set the MAC to operate in the appropriate duplex mode. */
1325         if (bp->duplex == DUPLEX_HALF)
1326                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1327         REG_WR(bp, BNX2_EMAC_MODE, val);
1328
1329         /* Enable/disable rx PAUSE. */
1330         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1331
1332         if (bp->flow_ctrl & FLOW_CTRL_RX)
1333                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1334         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1335
1336         /* Enable/disable tx PAUSE. */
1337         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1338         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1339
1340         if (bp->flow_ctrl & FLOW_CTRL_TX)
1341                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1342         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1343
1344         /* Acknowledge the interrupt. */
1345         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1346
1347         bnx2_init_all_rx_contexts(bp);
1348 }
1349
1350 static void
1351 bnx2_enable_bmsr1(struct bnx2 *bp)
1352 {
1353         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1354             (CHIP_NUM(bp) == CHIP_NUM_5709))
1355                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1356                                MII_BNX2_BLK_ADDR_GP_STATUS);
1357 }
1358
1359 static void
1360 bnx2_disable_bmsr1(struct bnx2 *bp)
1361 {
1362         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1363             (CHIP_NUM(bp) == CHIP_NUM_5709))
1364                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1365                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1366 }
1367
1368 static int
1369 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1370 {
1371         u32 up1;
1372         int ret = 1;
1373
1374         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1375                 return 0;
1376
1377         if (bp->autoneg & AUTONEG_SPEED)
1378                 bp->advertising |= ADVERTISED_2500baseX_Full;
1379
1380         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1381                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1382
1383         bnx2_read_phy(bp, bp->mii_up1, &up1);
1384         if (!(up1 & BCM5708S_UP1_2G5)) {
1385                 up1 |= BCM5708S_UP1_2G5;
1386                 bnx2_write_phy(bp, bp->mii_up1, up1);
1387                 ret = 0;
1388         }
1389
1390         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1391                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1392                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1393
1394         return ret;
1395 }
1396
1397 static int
1398 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1399 {
1400         u32 up1;
1401         int ret = 0;
1402
1403         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1404                 return 0;
1405
1406         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1407                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1408
1409         bnx2_read_phy(bp, bp->mii_up1, &up1);
1410         if (up1 & BCM5708S_UP1_2G5) {
1411                 up1 &= ~BCM5708S_UP1_2G5;
1412                 bnx2_write_phy(bp, bp->mii_up1, up1);
1413                 ret = 1;
1414         }
1415
1416         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1417                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1418                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1419
1420         return ret;
1421 }
1422
1423 static void
1424 bnx2_enable_forced_2g5(struct bnx2 *bp)
1425 {
1426         u32 uninitialized_var(bmcr);
1427         int err;
1428
1429         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1430                 return;
1431
1432         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1433                 u32 val;
1434
1435                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1436                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1437                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1438                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1439                         val |= MII_BNX2_SD_MISC1_FORCE |
1440                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1441                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1442                 }
1443
1444                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1445                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1446                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1447
1448         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1449                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1450                 if (!err)
1451                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1452         } else {
1453                 return;
1454         }
1455
1456         if (err)
1457                 return;
1458
1459         if (bp->autoneg & AUTONEG_SPEED) {
1460                 bmcr &= ~BMCR_ANENABLE;
1461                 if (bp->req_duplex == DUPLEX_FULL)
1462                         bmcr |= BMCR_FULLDPLX;
1463         }
1464         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1465 }
1466
1467 static void
1468 bnx2_disable_forced_2g5(struct bnx2 *bp)
1469 {
1470         u32 uninitialized_var(bmcr);
1471         int err;
1472
1473         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1474                 return;
1475
1476         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1477                 u32 val;
1478
1479                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1480                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1481                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1482                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1483                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1484                 }
1485
1486                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1487                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1488                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1489
1490         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1491                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1492                 if (!err)
1493                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1494         } else {
1495                 return;
1496         }
1497
1498         if (err)
1499                 return;
1500
1501         if (bp->autoneg & AUTONEG_SPEED)
1502                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1503         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1504 }
1505
1506 static void
1507 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1508 {
1509         u32 val;
1510
1511         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1512         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1513         if (start)
1514                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1515         else
1516                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1517 }
1518
1519 static int
1520 bnx2_set_link(struct bnx2 *bp)
1521 {
1522         u32 bmsr;
1523         u8 link_up;
1524
1525         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1526                 bp->link_up = 1;
1527                 return 0;
1528         }
1529
1530         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1531                 return 0;
1532
1533         link_up = bp->link_up;
1534
1535         bnx2_enable_bmsr1(bp);
1536         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1537         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1538         bnx2_disable_bmsr1(bp);
1539
1540         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1541             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1542                 u32 val, an_dbg;
1543
1544                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1545                         bnx2_5706s_force_link_dn(bp, 0);
1546                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1547                 }
1548                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1549
1550                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1551                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1552                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1553
1554                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1555                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1556                         bmsr |= BMSR_LSTATUS;
1557                 else
1558                         bmsr &= ~BMSR_LSTATUS;
1559         }
1560
1561         if (bmsr & BMSR_LSTATUS) {
1562                 bp->link_up = 1;
1563
1564                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1565                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1566                                 bnx2_5706s_linkup(bp);
1567                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1568                                 bnx2_5708s_linkup(bp);
1569                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1570                                 bnx2_5709s_linkup(bp);
1571                 }
1572                 else {
1573                         bnx2_copper_linkup(bp);
1574                 }
1575                 bnx2_resolve_flow_ctrl(bp);
1576         }
1577         else {
1578                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1579                     (bp->autoneg & AUTONEG_SPEED))
1580                         bnx2_disable_forced_2g5(bp);
1581
1582                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1583                         u32 bmcr;
1584
1585                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1586                         bmcr |= BMCR_ANENABLE;
1587                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1588
1589                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1590                 }
1591                 bp->link_up = 0;
1592         }
1593
1594         if (bp->link_up != link_up) {
1595                 bnx2_report_link(bp);
1596         }
1597
1598         bnx2_set_mac_link(bp);
1599
1600         return 0;
1601 }
1602
1603 static int
1604 bnx2_reset_phy(struct bnx2 *bp)
1605 {
1606         int i;
1607         u32 reg;
1608
1609         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1610
1611 #define PHY_RESET_MAX_WAIT 100
1612         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1613                 udelay(10);
1614
1615                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1616                 if (!(reg & BMCR_RESET)) {
1617                         udelay(20);
1618                         break;
1619                 }
1620         }
1621         if (i == PHY_RESET_MAX_WAIT) {
1622                 return -EBUSY;
1623         }
1624         return 0;
1625 }
1626
1627 static u32
1628 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1629 {
1630         u32 adv = 0;
1631
1632         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1633                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1634
1635                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1636                         adv = ADVERTISE_1000XPAUSE;
1637                 }
1638                 else {
1639                         adv = ADVERTISE_PAUSE_CAP;
1640                 }
1641         }
1642         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1643                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644                         adv = ADVERTISE_1000XPSE_ASYM;
1645                 }
1646                 else {
1647                         adv = ADVERTISE_PAUSE_ASYM;
1648                 }
1649         }
1650         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1651                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1653                 }
1654                 else {
1655                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1656                 }
1657         }
1658         return adv;
1659 }
1660
1661 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1662
1663 static int
1664 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1665 __releases(&bp->phy_lock)
1666 __acquires(&bp->phy_lock)
1667 {
1668         u32 speed_arg = 0, pause_adv;
1669
1670         pause_adv = bnx2_phy_get_pause_adv(bp);
1671
1672         if (bp->autoneg & AUTONEG_SPEED) {
1673                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1674                 if (bp->advertising & ADVERTISED_10baseT_Half)
1675                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1676                 if (bp->advertising & ADVERTISED_10baseT_Full)
1677                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1678                 if (bp->advertising & ADVERTISED_100baseT_Half)
1679                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1680                 if (bp->advertising & ADVERTISED_100baseT_Full)
1681                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1682                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1683                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1684                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1685                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1686         } else {
1687                 if (bp->req_line_speed == SPEED_2500)
1688                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1689                 else if (bp->req_line_speed == SPEED_1000)
1690                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1691                 else if (bp->req_line_speed == SPEED_100) {
1692                         if (bp->req_duplex == DUPLEX_FULL)
1693                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1694                         else
1695                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1696                 } else if (bp->req_line_speed == SPEED_10) {
1697                         if (bp->req_duplex == DUPLEX_FULL)
1698                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1699                         else
1700                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1701                 }
1702         }
1703
1704         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1705                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1706         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1707                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1708
1709         if (port == PORT_TP)
1710                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1711                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1712
1713         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1714
1715         spin_unlock_bh(&bp->phy_lock);
1716         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1717         spin_lock_bh(&bp->phy_lock);
1718
1719         return 0;
1720 }
1721
1722 static int
1723 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1724 __releases(&bp->phy_lock)
1725 __acquires(&bp->phy_lock)
1726 {
1727         u32 adv, bmcr;
1728         u32 new_adv = 0;
1729
1730         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1731                 return bnx2_setup_remote_phy(bp, port);
1732
1733         if (!(bp->autoneg & AUTONEG_SPEED)) {
1734                 u32 new_bmcr;
1735                 int force_link_down = 0;
1736
1737                 if (bp->req_line_speed == SPEED_2500) {
1738                         if (!bnx2_test_and_enable_2g5(bp))
1739                                 force_link_down = 1;
1740                 } else if (bp->req_line_speed == SPEED_1000) {
1741                         if (bnx2_test_and_disable_2g5(bp))
1742                                 force_link_down = 1;
1743                 }
1744                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1745                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1746
1747                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1748                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1749                 new_bmcr |= BMCR_SPEED1000;
1750
1751                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1752                         if (bp->req_line_speed == SPEED_2500)
1753                                 bnx2_enable_forced_2g5(bp);
1754                         else if (bp->req_line_speed == SPEED_1000) {
1755                                 bnx2_disable_forced_2g5(bp);
1756                                 new_bmcr &= ~0x2000;
1757                         }
1758
1759                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1760                         if (bp->req_line_speed == SPEED_2500)
1761                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1762                         else
1763                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1764                 }
1765
1766                 if (bp->req_duplex == DUPLEX_FULL) {
1767                         adv |= ADVERTISE_1000XFULL;
1768                         new_bmcr |= BMCR_FULLDPLX;
1769                 }
1770                 else {
1771                         adv |= ADVERTISE_1000XHALF;
1772                         new_bmcr &= ~BMCR_FULLDPLX;
1773                 }
1774                 if ((new_bmcr != bmcr) || (force_link_down)) {
1775                         /* Force a link down visible on the other side */
1776                         if (bp->link_up) {
1777                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1778                                                ~(ADVERTISE_1000XFULL |
1779                                                  ADVERTISE_1000XHALF));
1780                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1781                                         BMCR_ANRESTART | BMCR_ANENABLE);
1782
1783                                 bp->link_up = 0;
1784                                 netif_carrier_off(bp->dev);
1785                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1786                                 bnx2_report_link(bp);
1787                         }
1788                         bnx2_write_phy(bp, bp->mii_adv, adv);
1789                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1790                 } else {
1791                         bnx2_resolve_flow_ctrl(bp);
1792                         bnx2_set_mac_link(bp);
1793                 }
1794                 return 0;
1795         }
1796
1797         bnx2_test_and_enable_2g5(bp);
1798
1799         if (bp->advertising & ADVERTISED_1000baseT_Full)
1800                 new_adv |= ADVERTISE_1000XFULL;
1801
1802         new_adv |= bnx2_phy_get_pause_adv(bp);
1803
1804         bnx2_read_phy(bp, bp->mii_adv, &adv);
1805         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1806
1807         bp->serdes_an_pending = 0;
1808         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1809                 /* Force a link down visible on the other side */
1810                 if (bp->link_up) {
1811                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1812                         spin_unlock_bh(&bp->phy_lock);
1813                         msleep(20);
1814                         spin_lock_bh(&bp->phy_lock);
1815                 }
1816
1817                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1818                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1819                         BMCR_ANENABLE);
1820                 /* Speed up link-up time when the link partner
1821                  * does not autonegotiate which is very common
1822                  * in blade servers. Some blade servers use
1823                  * IPMI for kerboard input and it's important
1824                  * to minimize link disruptions. Autoneg. involves
1825                  * exchanging base pages plus 3 next pages and
1826                  * normally completes in about 120 msec.
1827                  */
1828                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1829                 bp->serdes_an_pending = 1;
1830                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1831         } else {
1832                 bnx2_resolve_flow_ctrl(bp);
1833                 bnx2_set_mac_link(bp);
1834         }
1835
1836         return 0;
1837 }
1838
1839 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1840         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1841                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1842                 (ADVERTISED_1000baseT_Full)
1843
1844 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1845         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1846         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1847         ADVERTISED_1000baseT_Full)
1848
1849 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1850         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1851
1852 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1853
1854 static void
1855 bnx2_set_default_remote_link(struct bnx2 *bp)
1856 {
1857         u32 link;
1858
1859         if (bp->phy_port == PORT_TP)
1860                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1861         else
1862                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1863
1864         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1865                 bp->req_line_speed = 0;
1866                 bp->autoneg |= AUTONEG_SPEED;
1867                 bp->advertising = ADVERTISED_Autoneg;
1868                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1869                         bp->advertising |= ADVERTISED_10baseT_Half;
1870                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1871                         bp->advertising |= ADVERTISED_10baseT_Full;
1872                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1873                         bp->advertising |= ADVERTISED_100baseT_Half;
1874                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1875                         bp->advertising |= ADVERTISED_100baseT_Full;
1876                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1877                         bp->advertising |= ADVERTISED_1000baseT_Full;
1878                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1879                         bp->advertising |= ADVERTISED_2500baseX_Full;
1880         } else {
1881                 bp->autoneg = 0;
1882                 bp->advertising = 0;
1883                 bp->req_duplex = DUPLEX_FULL;
1884                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1885                         bp->req_line_speed = SPEED_10;
1886                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1887                                 bp->req_duplex = DUPLEX_HALF;
1888                 }
1889                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1890                         bp->req_line_speed = SPEED_100;
1891                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1892                                 bp->req_duplex = DUPLEX_HALF;
1893                 }
1894                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1895                         bp->req_line_speed = SPEED_1000;
1896                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1897                         bp->req_line_speed = SPEED_2500;
1898         }
1899 }
1900
1901 static void
1902 bnx2_set_default_link(struct bnx2 *bp)
1903 {
1904         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1905                 bnx2_set_default_remote_link(bp);
1906                 return;
1907         }
1908
1909         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1910         bp->req_line_speed = 0;
1911         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1912                 u32 reg;
1913
1914                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1915
1916                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1917                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1918                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1919                         bp->autoneg = 0;
1920                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1921                         bp->req_duplex = DUPLEX_FULL;
1922                 }
1923         } else
1924                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1925 }
1926
1927 static void
1928 bnx2_send_heart_beat(struct bnx2 *bp)
1929 {
1930         u32 msg;
1931         u32 addr;
1932
1933         spin_lock(&bp->indirect_lock);
1934         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1935         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1936         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1937         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1938         spin_unlock(&bp->indirect_lock);
1939 }
1940
1941 static void
1942 bnx2_remote_phy_event(struct bnx2 *bp)
1943 {
1944         u32 msg;
1945         u8 link_up = bp->link_up;
1946         u8 old_port;
1947
1948         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1949
1950         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1951                 bnx2_send_heart_beat(bp);
1952
1953         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1954
1955         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1956                 bp->link_up = 0;
1957         else {
1958                 u32 speed;
1959
1960                 bp->link_up = 1;
1961                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1962                 bp->duplex = DUPLEX_FULL;
1963                 switch (speed) {
1964                         case BNX2_LINK_STATUS_10HALF:
1965                                 bp->duplex = DUPLEX_HALF;
1966                         case BNX2_LINK_STATUS_10FULL:
1967                                 bp->line_speed = SPEED_10;
1968                                 break;
1969                         case BNX2_LINK_STATUS_100HALF:
1970                                 bp->duplex = DUPLEX_HALF;
1971                         case BNX2_LINK_STATUS_100BASE_T4:
1972                         case BNX2_LINK_STATUS_100FULL:
1973                                 bp->line_speed = SPEED_100;
1974                                 break;
1975                         case BNX2_LINK_STATUS_1000HALF:
1976                                 bp->duplex = DUPLEX_HALF;
1977                         case BNX2_LINK_STATUS_1000FULL:
1978                                 bp->line_speed = SPEED_1000;
1979                                 break;
1980                         case BNX2_LINK_STATUS_2500HALF:
1981                                 bp->duplex = DUPLEX_HALF;
1982                         case BNX2_LINK_STATUS_2500FULL:
1983                                 bp->line_speed = SPEED_2500;
1984                                 break;
1985                         default:
1986                                 bp->line_speed = 0;
1987                                 break;
1988                 }
1989
1990                 bp->flow_ctrl = 0;
1991                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1992                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1993                         if (bp->duplex == DUPLEX_FULL)
1994                                 bp->flow_ctrl = bp->req_flow_ctrl;
1995                 } else {
1996                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1997                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1998                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1999                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2000                 }
2001
2002                 old_port = bp->phy_port;
2003                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2004                         bp->phy_port = PORT_FIBRE;
2005                 else
2006                         bp->phy_port = PORT_TP;
2007
2008                 if (old_port != bp->phy_port)
2009                         bnx2_set_default_link(bp);
2010
2011         }
2012         if (bp->link_up != link_up)
2013                 bnx2_report_link(bp);
2014
2015         bnx2_set_mac_link(bp);
2016 }
2017
2018 static int
2019 bnx2_set_remote_link(struct bnx2 *bp)
2020 {
2021         u32 evt_code;
2022
2023         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2024         switch (evt_code) {
2025                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2026                         bnx2_remote_phy_event(bp);
2027                         break;
2028                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2029                 default:
2030                         bnx2_send_heart_beat(bp);
2031                         break;
2032         }
2033         return 0;
2034 }
2035
2036 static int
2037 bnx2_setup_copper_phy(struct bnx2 *bp)
2038 __releases(&bp->phy_lock)
2039 __acquires(&bp->phy_lock)
2040 {
2041         u32 bmcr;
2042         u32 new_bmcr;
2043
2044         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2045
2046         if (bp->autoneg & AUTONEG_SPEED) {
2047                 u32 adv_reg, adv1000_reg;
2048                 u32 new_adv_reg = 0;
2049                 u32 new_adv1000_reg = 0;
2050
2051                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2052                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2053                         ADVERTISE_PAUSE_ASYM);
2054
2055                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2056                 adv1000_reg &= PHY_ALL_1000_SPEED;
2057
2058                 if (bp->advertising & ADVERTISED_10baseT_Half)
2059                         new_adv_reg |= ADVERTISE_10HALF;
2060                 if (bp->advertising & ADVERTISED_10baseT_Full)
2061                         new_adv_reg |= ADVERTISE_10FULL;
2062                 if (bp->advertising & ADVERTISED_100baseT_Half)
2063                         new_adv_reg |= ADVERTISE_100HALF;
2064                 if (bp->advertising & ADVERTISED_100baseT_Full)
2065                         new_adv_reg |= ADVERTISE_100FULL;
2066                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2067                         new_adv1000_reg |= ADVERTISE_1000FULL;
2068
2069                 new_adv_reg |= ADVERTISE_CSMA;
2070
2071                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2072
2073                 if ((adv1000_reg != new_adv1000_reg) ||
2074                         (adv_reg != new_adv_reg) ||
2075                         ((bmcr & BMCR_ANENABLE) == 0)) {
2076
2077                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2078                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2079                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2080                                 BMCR_ANENABLE);
2081                 }
2082                 else if (bp->link_up) {
2083                         /* Flow ctrl may have changed from auto to forced */
2084                         /* or vice-versa. */
2085
2086                         bnx2_resolve_flow_ctrl(bp);
2087                         bnx2_set_mac_link(bp);
2088                 }
2089                 return 0;
2090         }
2091
2092         new_bmcr = 0;
2093         if (bp->req_line_speed == SPEED_100) {
2094                 new_bmcr |= BMCR_SPEED100;
2095         }
2096         if (bp->req_duplex == DUPLEX_FULL) {
2097                 new_bmcr |= BMCR_FULLDPLX;
2098         }
2099         if (new_bmcr != bmcr) {
2100                 u32 bmsr;
2101
2102                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2103                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2104
2105                 if (bmsr & BMSR_LSTATUS) {
2106                         /* Force link down */
2107                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2108                         spin_unlock_bh(&bp->phy_lock);
2109                         msleep(50);
2110                         spin_lock_bh(&bp->phy_lock);
2111
2112                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2113                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2114                 }
2115
2116                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2117
2118                 /* Normally, the new speed is setup after the link has
2119                  * gone down and up again. In some cases, link will not go
2120                  * down so we need to set up the new speed here.
2121                  */
2122                 if (bmsr & BMSR_LSTATUS) {
2123                         bp->line_speed = bp->req_line_speed;
2124                         bp->duplex = bp->req_duplex;
2125                         bnx2_resolve_flow_ctrl(bp);
2126                         bnx2_set_mac_link(bp);
2127                 }
2128         } else {
2129                 bnx2_resolve_flow_ctrl(bp);
2130                 bnx2_set_mac_link(bp);
2131         }
2132         return 0;
2133 }
2134
2135 static int
2136 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2137 __releases(&bp->phy_lock)
2138 __acquires(&bp->phy_lock)
2139 {
2140         if (bp->loopback == MAC_LOOPBACK)
2141                 return 0;
2142
2143         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2144                 return bnx2_setup_serdes_phy(bp, port);
2145         }
2146         else {
2147                 return bnx2_setup_copper_phy(bp);
2148         }
2149 }
2150
2151 static int
2152 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2153 {
2154         u32 val;
2155
2156         bp->mii_bmcr = MII_BMCR + 0x10;
2157         bp->mii_bmsr = MII_BMSR + 0x10;
2158         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2159         bp->mii_adv = MII_ADVERTISE + 0x10;
2160         bp->mii_lpa = MII_LPA + 0x10;
2161         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2162
2163         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2164         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2165
2166         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2167         if (reset_phy)
2168                 bnx2_reset_phy(bp);
2169
2170         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2171
2172         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2173         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2174         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2175         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2176
2177         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2178         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2179         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2180                 val |= BCM5708S_UP1_2G5;
2181         else
2182                 val &= ~BCM5708S_UP1_2G5;
2183         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2184
2185         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2186         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2187         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2188         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2189
2190         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2191
2192         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2193               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2194         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2195
2196         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2197
2198         return 0;
2199 }
2200
2201 static int
2202 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2203 {
2204         u32 val;
2205
2206         if (reset_phy)
2207                 bnx2_reset_phy(bp);
2208
2209         bp->mii_up1 = BCM5708S_UP1;
2210
2211         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2212         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2213         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2214
2215         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2216         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2217         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2218
2219         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2220         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2221         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2222
2223         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2224                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2225                 val |= BCM5708S_UP1_2G5;
2226                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2227         }
2228
2229         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2230             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2231             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2232                 /* increase tx signal amplitude */
2233                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2234                                BCM5708S_BLK_ADDR_TX_MISC);
2235                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2236                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2237                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2238                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2239         }
2240
2241         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2242               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2243
2244         if (val) {
2245                 u32 is_backplane;
2246
2247                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2248                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2249                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2250                                        BCM5708S_BLK_ADDR_TX_MISC);
2251                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2252                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2253                                        BCM5708S_BLK_ADDR_DIG);
2254                 }
2255         }
2256         return 0;
2257 }
2258
2259 static int
2260 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2261 {
2262         if (reset_phy)
2263                 bnx2_reset_phy(bp);
2264
2265         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2266
2267         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2268                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2269
2270         if (bp->dev->mtu > 1500) {
2271                 u32 val;
2272
2273                 /* Set extended packet length bit */
2274                 bnx2_write_phy(bp, 0x18, 0x7);
2275                 bnx2_read_phy(bp, 0x18, &val);
2276                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2277
2278                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2279                 bnx2_read_phy(bp, 0x1c, &val);
2280                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2281         }
2282         else {
2283                 u32 val;
2284
2285                 bnx2_write_phy(bp, 0x18, 0x7);
2286                 bnx2_read_phy(bp, 0x18, &val);
2287                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2288
2289                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2290                 bnx2_read_phy(bp, 0x1c, &val);
2291                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2292         }
2293
2294         return 0;
2295 }
2296
2297 static int
2298 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2299 {
2300         u32 val;
2301
2302         if (reset_phy)
2303                 bnx2_reset_phy(bp);
2304
2305         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2306                 bnx2_write_phy(bp, 0x18, 0x0c00);
2307                 bnx2_write_phy(bp, 0x17, 0x000a);
2308                 bnx2_write_phy(bp, 0x15, 0x310b);
2309                 bnx2_write_phy(bp, 0x17, 0x201f);
2310                 bnx2_write_phy(bp, 0x15, 0x9506);
2311                 bnx2_write_phy(bp, 0x17, 0x401f);
2312                 bnx2_write_phy(bp, 0x15, 0x14e2);
2313                 bnx2_write_phy(bp, 0x18, 0x0400);
2314         }
2315
2316         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2317                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2318                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2319                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2320                 val &= ~(1 << 8);
2321                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2322         }
2323
2324         if (bp->dev->mtu > 1500) {
2325                 /* Set extended packet length bit */
2326                 bnx2_write_phy(bp, 0x18, 0x7);
2327                 bnx2_read_phy(bp, 0x18, &val);
2328                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2329
2330                 bnx2_read_phy(bp, 0x10, &val);
2331                 bnx2_write_phy(bp, 0x10, val | 0x1);
2332         }
2333         else {
2334                 bnx2_write_phy(bp, 0x18, 0x7);
2335                 bnx2_read_phy(bp, 0x18, &val);
2336                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2337
2338                 bnx2_read_phy(bp, 0x10, &val);
2339                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2340         }
2341
2342         /* ethernet@wirespeed */
2343         bnx2_write_phy(bp, 0x18, 0x7007);
2344         bnx2_read_phy(bp, 0x18, &val);
2345         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2346         return 0;
2347 }
2348
2349
2350 static int
2351 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2352 __releases(&bp->phy_lock)
2353 __acquires(&bp->phy_lock)
2354 {
2355         u32 val;
2356         int rc = 0;
2357
2358         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2359         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2360
2361         bp->mii_bmcr = MII_BMCR;
2362         bp->mii_bmsr = MII_BMSR;
2363         bp->mii_bmsr1 = MII_BMSR;
2364         bp->mii_adv = MII_ADVERTISE;
2365         bp->mii_lpa = MII_LPA;
2366
2367         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2368
2369         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2370                 goto setup_phy;
2371
2372         bnx2_read_phy(bp, MII_PHYSID1, &val);
2373         bp->phy_id = val << 16;
2374         bnx2_read_phy(bp, MII_PHYSID2, &val);
2375         bp->phy_id |= val & 0xffff;
2376
2377         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2378                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2379                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2380                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2381                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2382                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2383                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2384         }
2385         else {
2386                 rc = bnx2_init_copper_phy(bp, reset_phy);
2387         }
2388
2389 setup_phy:
2390         if (!rc)
2391                 rc = bnx2_setup_phy(bp, bp->phy_port);
2392
2393         return rc;
2394 }
2395
2396 static int
2397 bnx2_set_mac_loopback(struct bnx2 *bp)
2398 {
2399         u32 mac_mode;
2400
2401         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2402         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2403         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2404         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2405         bp->link_up = 1;
2406         return 0;
2407 }
2408
2409 static int bnx2_test_link(struct bnx2 *);
2410
2411 static int
2412 bnx2_set_phy_loopback(struct bnx2 *bp)
2413 {
2414         u32 mac_mode;
2415         int rc, i;
2416
2417         spin_lock_bh(&bp->phy_lock);
2418         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2419                             BMCR_SPEED1000);
2420         spin_unlock_bh(&bp->phy_lock);
2421         if (rc)
2422                 return rc;
2423
2424         for (i = 0; i < 10; i++) {
2425                 if (bnx2_test_link(bp) == 0)
2426                         break;
2427                 msleep(100);
2428         }
2429
2430         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2431         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2432                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2433                       BNX2_EMAC_MODE_25G_MODE);
2434
2435         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2436         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2437         bp->link_up = 1;
2438         return 0;
2439 }
2440
2441 static int
2442 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2443 {
2444         int i;
2445         u32 val;
2446
2447         bp->fw_wr_seq++;
2448         msg_data |= bp->fw_wr_seq;
2449
2450         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2451
2452         if (!ack)
2453                 return 0;
2454
2455         /* wait for an acknowledgement. */
2456         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2457                 msleep(10);
2458
2459                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2460
2461                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2462                         break;
2463         }
2464         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2465                 return 0;
2466
2467         /* If we timed out, inform the firmware that this is the case. */
2468         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2469                 if (!silent)
2470                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2471
2472                 msg_data &= ~BNX2_DRV_MSG_CODE;
2473                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2474
2475                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2476
2477                 return -EBUSY;
2478         }
2479
2480         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2481                 return -EIO;
2482
2483         return 0;
2484 }
2485
2486 static int
2487 bnx2_init_5709_context(struct bnx2 *bp)
2488 {
2489         int i, ret = 0;
2490         u32 val;
2491
2492         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2493         val |= (BCM_PAGE_BITS - 8) << 16;
2494         REG_WR(bp, BNX2_CTX_COMMAND, val);
2495         for (i = 0; i < 10; i++) {
2496                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2497                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2498                         break;
2499                 udelay(2);
2500         }
2501         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2502                 return -EBUSY;
2503
2504         for (i = 0; i < bp->ctx_pages; i++) {
2505                 int j;
2506
2507                 if (bp->ctx_blk[i])
2508                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2509                 else
2510                         return -ENOMEM;
2511
2512                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2513                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2514                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2515                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2516                        (u64) bp->ctx_blk_mapping[i] >> 32);
2517                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2518                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2519                 for (j = 0; j < 10; j++) {
2520
2521                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2522                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2523                                 break;
2524                         udelay(5);
2525                 }
2526                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2527                         ret = -EBUSY;
2528                         break;
2529                 }
2530         }
2531         return ret;
2532 }
2533
2534 static void
2535 bnx2_init_context(struct bnx2 *bp)
2536 {
2537         u32 vcid;
2538
2539         vcid = 96;
2540         while (vcid) {
2541                 u32 vcid_addr, pcid_addr, offset;
2542                 int i;
2543
2544                 vcid--;
2545
2546                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2547                         u32 new_vcid;
2548
2549                         vcid_addr = GET_PCID_ADDR(vcid);
2550                         if (vcid & 0x8) {
2551                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2552                         }
2553                         else {
2554                                 new_vcid = vcid;
2555                         }
2556                         pcid_addr = GET_PCID_ADDR(new_vcid);
2557                 }
2558                 else {
2559                         vcid_addr = GET_CID_ADDR(vcid);
2560                         pcid_addr = vcid_addr;
2561                 }
2562
2563                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2564                         vcid_addr += (i << PHY_CTX_SHIFT);
2565                         pcid_addr += (i << PHY_CTX_SHIFT);
2566
2567                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2568                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2569
2570                         /* Zero out the context. */
2571                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2572                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2573                 }
2574         }
2575 }
2576
2577 static int
2578 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2579 {
2580         u16 *good_mbuf;
2581         u32 good_mbuf_cnt;
2582         u32 val;
2583
2584         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2585         if (good_mbuf == NULL) {
2586                 pr_err("Failed to allocate memory in %s\n", __func__);
2587                 return -ENOMEM;
2588         }
2589
2590         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2591                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2592
2593         good_mbuf_cnt = 0;
2594
2595         /* Allocate a bunch of mbufs and save the good ones in an array. */
2596         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2597         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2598                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2599                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2600
2601                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2602
2603                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2604
2605                 /* The addresses with Bit 9 set are bad memory blocks. */
2606                 if (!(val & (1 << 9))) {
2607                         good_mbuf[good_mbuf_cnt] = (u16) val;
2608                         good_mbuf_cnt++;
2609                 }
2610
2611                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2612         }
2613
2614         /* Free the good ones back to the mbuf pool thus discarding
2615          * all the bad ones. */
2616         while (good_mbuf_cnt) {
2617                 good_mbuf_cnt--;
2618
2619                 val = good_mbuf[good_mbuf_cnt];
2620                 val = (val << 9) | val | 1;
2621
2622                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2623         }
2624         kfree(good_mbuf);
2625         return 0;
2626 }
2627
2628 static void
2629 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2630 {
2631         u32 val;
2632
2633         val = (mac_addr[0] << 8) | mac_addr[1];
2634
2635         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2636
2637         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2638                 (mac_addr[4] << 8) | mac_addr[5];
2639
2640         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2641 }
2642
2643 static inline int
2644 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2645 {
2646         dma_addr_t mapping;
2647         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2648         struct rx_bd *rxbd =
2649                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2650         struct page *page = alloc_page(gfp);
2651
2652         if (!page)
2653                 return -ENOMEM;
2654         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2655                                PCI_DMA_FROMDEVICE);
2656         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2657                 __free_page(page);
2658                 return -EIO;
2659         }
2660
2661         rx_pg->page = page;
2662         dma_unmap_addr_set(rx_pg, mapping, mapping);
2663         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2664         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2665         return 0;
2666 }
2667
2668 static void
2669 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2670 {
2671         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2672         struct page *page = rx_pg->page;
2673
2674         if (!page)
2675                 return;
2676
2677         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2678                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2679
2680         __free_page(page);
2681         rx_pg->page = NULL;
2682 }
2683
2684 static inline int
2685 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2686 {
2687         struct sk_buff *skb;
2688         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2689         dma_addr_t mapping;
2690         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2691         unsigned long align;
2692
2693         skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2694         if (skb == NULL) {
2695                 return -ENOMEM;
2696         }
2697
2698         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2699                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2700
2701         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2702                                  PCI_DMA_FROMDEVICE);
2703         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2704                 dev_kfree_skb(skb);
2705                 return -EIO;
2706         }
2707
2708         rx_buf->skb = skb;
2709         rx_buf->desc = (struct l2_fhdr *) skb->data;
2710         dma_unmap_addr_set(rx_buf, mapping, mapping);
2711
2712         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2713         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2714
2715         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2716
2717         return 0;
2718 }
2719
2720 static int
2721 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2722 {
2723         struct status_block *sblk = bnapi->status_blk.msi;
2724         u32 new_link_state, old_link_state;
2725         int is_set = 1;
2726
2727         new_link_state = sblk->status_attn_bits & event;
2728         old_link_state = sblk->status_attn_bits_ack & event;
2729         if (new_link_state != old_link_state) {
2730                 if (new_link_state)
2731                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2732                 else
2733                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2734         } else
2735                 is_set = 0;
2736
2737         return is_set;
2738 }
2739
2740 static void
2741 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2742 {
2743         spin_lock(&bp->phy_lock);
2744
2745         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2746                 bnx2_set_link(bp);
2747         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2748                 bnx2_set_remote_link(bp);
2749
2750         spin_unlock(&bp->phy_lock);
2751
2752 }
2753
2754 static inline u16
2755 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2756 {
2757         u16 cons;
2758
2759         /* Tell compiler that status block fields can change. */
2760         barrier();
2761         cons = *bnapi->hw_tx_cons_ptr;
2762         barrier();
2763         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2764                 cons++;
2765         return cons;
2766 }
2767
2768 static int
2769 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2770 {
2771         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2772         u16 hw_cons, sw_cons, sw_ring_cons;
2773         int tx_pkt = 0, index;
2774         struct netdev_queue *txq;
2775
2776         index = (bnapi - bp->bnx2_napi);
2777         txq = netdev_get_tx_queue(bp->dev, index);
2778
2779         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2780         sw_cons = txr->tx_cons;
2781
2782         while (sw_cons != hw_cons) {
2783                 struct sw_tx_bd *tx_buf;
2784                 struct sk_buff *skb;
2785                 int i, last;
2786
2787                 sw_ring_cons = TX_RING_IDX(sw_cons);
2788
2789                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2790                 skb = tx_buf->skb;
2791
2792                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2793                 prefetch(&skb->end);
2794
2795                 /* partial BD completions possible with TSO packets */
2796                 if (tx_buf->is_gso) {
2797                         u16 last_idx, last_ring_idx;
2798
2799                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2800                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2801                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2802                                 last_idx++;
2803                         }
2804                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2805                                 break;
2806                         }
2807                 }
2808
2809                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2810                         skb_headlen(skb), PCI_DMA_TODEVICE);
2811
2812                 tx_buf->skb = NULL;
2813                 last = tx_buf->nr_frags;
2814
2815                 for (i = 0; i < last; i++) {
2816                         sw_cons = NEXT_TX_BD(sw_cons);
2817
2818                         dma_unmap_page(&bp->pdev->dev,
2819                                 dma_unmap_addr(
2820                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2821                                         mapping),
2822                                 skb_shinfo(skb)->frags[i].size,
2823                                 PCI_DMA_TODEVICE);
2824                 }
2825
2826                 sw_cons = NEXT_TX_BD(sw_cons);
2827
2828                 dev_kfree_skb(skb);
2829                 tx_pkt++;
2830                 if (tx_pkt == budget)
2831                         break;
2832
2833                 if (hw_cons == sw_cons)
2834                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2835         }
2836
2837         txr->hw_tx_cons = hw_cons;
2838         txr->tx_cons = sw_cons;
2839
2840         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2841          * before checking for netif_tx_queue_stopped().  Without the
2842          * memory barrier, there is a small possibility that bnx2_start_xmit()
2843          * will miss it and cause the queue to be stopped forever.
2844          */
2845         smp_mb();
2846
2847         if (unlikely(netif_tx_queue_stopped(txq)) &&
2848                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2849                 __netif_tx_lock(txq, smp_processor_id());
2850                 if ((netif_tx_queue_stopped(txq)) &&
2851                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2852                         netif_tx_wake_queue(txq);
2853                 __netif_tx_unlock(txq);
2854         }
2855
2856         return tx_pkt;
2857 }
2858
2859 static void
2860 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2861                         struct sk_buff *skb, int count)
2862 {
2863         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2864         struct rx_bd *cons_bd, *prod_bd;
2865         int i;
2866         u16 hw_prod, prod;
2867         u16 cons = rxr->rx_pg_cons;
2868
2869         cons_rx_pg = &rxr->rx_pg_ring[cons];
2870
2871         /* The caller was unable to allocate a new page to replace the
2872          * last one in the frags array, so we need to recycle that page
2873          * and then free the skb.
2874          */
2875         if (skb) {
2876                 struct page *page;
2877                 struct skb_shared_info *shinfo;
2878
2879                 shinfo = skb_shinfo(skb);
2880                 shinfo->nr_frags--;
2881                 page = shinfo->frags[shinfo->nr_frags].page;
2882                 shinfo->frags[shinfo->nr_frags].page = NULL;
2883
2884                 cons_rx_pg->page = page;
2885                 dev_kfree_skb(skb);
2886         }
2887
2888         hw_prod = rxr->rx_pg_prod;
2889
2890         for (i = 0; i < count; i++) {
2891                 prod = RX_PG_RING_IDX(hw_prod);
2892
2893                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2894                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2895                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2896                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2897
2898                 if (prod != cons) {
2899                         prod_rx_pg->page = cons_rx_pg->page;
2900                         cons_rx_pg->page = NULL;
2901                         dma_unmap_addr_set(prod_rx_pg, mapping,
2902                                 dma_unmap_addr(cons_rx_pg, mapping));
2903
2904                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2905                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2906
2907                 }
2908                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2909                 hw_prod = NEXT_RX_BD(hw_prod);
2910         }
2911         rxr->rx_pg_prod = hw_prod;
2912         rxr->rx_pg_cons = cons;
2913 }
2914
2915 static inline void
2916 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2917                   struct sk_buff *skb, u16 cons, u16 prod)
2918 {
2919         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2920         struct rx_bd *cons_bd, *prod_bd;
2921
2922         cons_rx_buf = &rxr->rx_buf_ring[cons];
2923         prod_rx_buf = &rxr->rx_buf_ring[prod];
2924
2925         dma_sync_single_for_device(&bp->pdev->dev,
2926                 dma_unmap_addr(cons_rx_buf, mapping),
2927                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2928
2929         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2930
2931         prod_rx_buf->skb = skb;
2932         prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2933
2934         if (cons == prod)
2935                 return;
2936
2937         dma_unmap_addr_set(prod_rx_buf, mapping,
2938                         dma_unmap_addr(cons_rx_buf, mapping));
2939
2940         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2941         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2942         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2943         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2944 }
2945
2946 static int
2947 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2948             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2949             u32 ring_idx)
2950 {
2951         int err;
2952         u16 prod = ring_idx & 0xffff;
2953
2954         err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
2955         if (unlikely(err)) {
2956                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2957                 if (hdr_len) {
2958                         unsigned int raw_len = len + 4;
2959                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2960
2961                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2962                 }
2963                 return err;
2964         }
2965
2966         skb_reserve(skb, BNX2_RX_OFFSET);
2967         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
2968                          PCI_DMA_FROMDEVICE);
2969
2970         if (hdr_len == 0) {
2971                 skb_put(skb, len);
2972                 return 0;
2973         } else {
2974                 unsigned int i, frag_len, frag_size, pages;
2975                 struct sw_pg *rx_pg;
2976                 u16 pg_cons = rxr->rx_pg_cons;
2977                 u16 pg_prod = rxr->rx_pg_prod;
2978
2979                 frag_size = len + 4 - hdr_len;
2980                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2981                 skb_put(skb, hdr_len);
2982
2983                 for (i = 0; i < pages; i++) {
2984                         dma_addr_t mapping_old;
2985
2986                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2987                         if (unlikely(frag_len <= 4)) {
2988                                 unsigned int tail = 4 - frag_len;
2989
2990                                 rxr->rx_pg_cons = pg_cons;
2991                                 rxr->rx_pg_prod = pg_prod;
2992                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2993                                                         pages - i);
2994                                 skb->len -= tail;
2995                                 if (i == 0) {
2996                                         skb->tail -= tail;
2997                                 } else {
2998                                         skb_frag_t *frag =
2999                                                 &skb_shinfo(skb)->frags[i - 1];
3000                                         frag->size -= tail;
3001                                         skb->data_len -= tail;
3002                                         skb->truesize -= tail;
3003                                 }
3004                                 return 0;
3005                         }
3006                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3007
3008                         /* Don't unmap yet.  If we're unable to allocate a new
3009                          * page, we need to recycle the page and the DMA addr.
3010                          */
3011                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3012                         if (i == pages - 1)
3013                                 frag_len -= 4;
3014
3015                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3016                         rx_pg->page = NULL;
3017
3018                         err = bnx2_alloc_rx_page(bp, rxr,
3019                                                  RX_PG_RING_IDX(pg_prod),
3020                                                  GFP_ATOMIC);
3021                         if (unlikely(err)) {
3022                                 rxr->rx_pg_cons = pg_cons;
3023                                 rxr->rx_pg_prod = pg_prod;
3024                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3025                                                         pages - i);
3026                                 return err;
3027                         }
3028
3029                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3030                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3031
3032                         frag_size -= frag_len;
3033                         skb->data_len += frag_len;
3034                         skb->truesize += frag_len;
3035                         skb->len += frag_len;
3036
3037                         pg_prod = NEXT_RX_BD(pg_prod);
3038                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3039                 }
3040                 rxr->rx_pg_prod = pg_prod;
3041                 rxr->rx_pg_cons = pg_cons;
3042         }
3043         return 0;
3044 }
3045
3046 static inline u16
3047 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3048 {
3049         u16 cons;
3050
3051         /* Tell compiler that status block fields can change. */
3052         barrier();
3053         cons = *bnapi->hw_rx_cons_ptr;
3054         barrier();
3055         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3056                 cons++;
3057         return cons;
3058 }
3059
3060 static int
3061 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3062 {
3063         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3064         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3065         struct l2_fhdr *rx_hdr;
3066         int rx_pkt = 0, pg_ring_used = 0;
3067
3068         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3069         sw_cons = rxr->rx_cons;
3070         sw_prod = rxr->rx_prod;
3071
3072         /* Memory barrier necessary as speculative reads of the rx
3073          * buffer can be ahead of the index in the status block
3074          */
3075         rmb();
3076         while (sw_cons != hw_cons) {
3077                 unsigned int len, hdr_len;
3078                 u32 status;
3079                 struct sw_bd *rx_buf, *next_rx_buf;
3080                 struct sk_buff *skb;
3081                 dma_addr_t dma_addr;
3082
3083                 sw_ring_cons = RX_RING_IDX(sw_cons);
3084                 sw_ring_prod = RX_RING_IDX(sw_prod);
3085
3086                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3087                 skb = rx_buf->skb;
3088                 prefetchw(skb);
3089
3090                 next_rx_buf =
3091                         &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3092                 prefetch(next_rx_buf->desc);
3093
3094                 rx_buf->skb = NULL;
3095
3096                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3097
3098                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3099                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3100                         PCI_DMA_FROMDEVICE);
3101
3102                 rx_hdr = rx_buf->desc;
3103                 len = rx_hdr->l2_fhdr_pkt_len;
3104                 status = rx_hdr->l2_fhdr_status;
3105
3106                 hdr_len = 0;
3107                 if (status & L2_FHDR_STATUS_SPLIT) {
3108                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3109                         pg_ring_used = 1;
3110                 } else if (len > bp->rx_jumbo_thresh) {
3111                         hdr_len = bp->rx_jumbo_thresh;
3112                         pg_ring_used = 1;
3113                 }
3114
3115                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3116                                        L2_FHDR_ERRORS_PHY_DECODE |
3117                                        L2_FHDR_ERRORS_ALIGNMENT |
3118                                        L2_FHDR_ERRORS_TOO_SHORT |
3119                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3120
3121                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3122                                           sw_ring_prod);
3123                         if (pg_ring_used) {
3124                                 int pages;
3125
3126                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3127
3128                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3129                         }
3130                         goto next_rx;
3131                 }
3132
3133                 len -= 4;
3134
3135                 if (len <= bp->rx_copy_thresh) {
3136                         struct sk_buff *new_skb;
3137
3138                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3139                         if (new_skb == NULL) {
3140                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3141                                                   sw_ring_prod);
3142                                 goto next_rx;
3143                         }
3144
3145                         /* aligned copy */
3146                         skb_copy_from_linear_data_offset(skb,
3147                                                          BNX2_RX_OFFSET - 6,
3148                                       new_skb->data, len + 6);
3149                         skb_reserve(new_skb, 6);
3150                         skb_put(new_skb, len);
3151
3152                         bnx2_reuse_rx_skb(bp, rxr, skb,
3153                                 sw_ring_cons, sw_ring_prod);
3154
3155                         skb = new_skb;
3156                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3157                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3158                         goto next_rx;
3159
3160                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3161                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3162                         __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3163
3164                 skb->protocol = eth_type_trans(skb, bp->dev);
3165
3166                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3167                         (ntohs(skb->protocol) != 0x8100)) {
3168
3169                         dev_kfree_skb(skb);
3170                         goto next_rx;
3171
3172                 }
3173
3174                 skb_checksum_none_assert(skb);
3175                 if (bp->rx_csum &&
3176                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3177                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3178
3179                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3180                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3181                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3182                 }
3183                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3184                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3185                      L2_FHDR_STATUS_USE_RXHASH))
3186                         skb->rxhash = rx_hdr->l2_fhdr_hash;
3187
3188                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3189                 napi_gro_receive(&bnapi->napi, skb);
3190                 rx_pkt++;
3191
3192 next_rx:
3193                 sw_cons = NEXT_RX_BD(sw_cons);
3194                 sw_prod = NEXT_RX_BD(sw_prod);
3195
3196                 if ((rx_pkt == budget))
3197                         break;
3198
3199                 /* Refresh hw_cons to see if there is new work */
3200                 if (sw_cons == hw_cons) {
3201                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3202                         rmb();
3203                 }
3204         }
3205         rxr->rx_cons = sw_cons;
3206         rxr->rx_prod = sw_prod;
3207
3208         if (pg_ring_used)
3209                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3210
3211         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3212
3213         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3214
3215         mmiowb();
3216
3217         return rx_pkt;
3218
3219 }
3220
3221 /* MSI ISR - The only difference between this and the INTx ISR
3222  * is that the MSI interrupt is always serviced.
3223  */
3224 static irqreturn_t
3225 bnx2_msi(int irq, void *dev_instance)
3226 {
3227         struct bnx2_napi *bnapi = dev_instance;
3228         struct bnx2 *bp = bnapi->bp;
3229
3230         prefetch(bnapi->status_blk.msi);
3231         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3232                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3233                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3234
3235         /* Return here if interrupt is disabled. */
3236         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3237                 return IRQ_HANDLED;
3238
3239         napi_schedule(&bnapi->napi);
3240
3241         return IRQ_HANDLED;
3242 }
3243
3244 static irqreturn_t
3245 bnx2_msi_1shot(int irq, void *dev_instance)
3246 {
3247         struct bnx2_napi *bnapi = dev_instance;
3248         struct bnx2 *bp = bnapi->bp;
3249
3250         prefetch(bnapi->status_blk.msi);
3251
3252         /* Return here if interrupt is disabled. */
3253         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3254                 return IRQ_HANDLED;
3255
3256         napi_schedule(&bnapi->napi);
3257
3258         return IRQ_HANDLED;
3259 }
3260
3261 static irqreturn_t
3262 bnx2_interrupt(int irq, void *dev_instance)
3263 {
3264         struct bnx2_napi *bnapi = dev_instance;
3265         struct bnx2 *bp = bnapi->bp;
3266         struct status_block *sblk = bnapi->status_blk.msi;
3267
3268         /* When using INTx, it is possible for the interrupt to arrive
3269          * at the CPU before the status block posted prior to the
3270          * interrupt. Reading a register will flush the status block.
3271          * When using MSI, the MSI message will always complete after
3272          * the status block write.
3273          */
3274         if ((sblk->status_idx == bnapi->last_status_idx) &&
3275             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3276              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3277                 return IRQ_NONE;
3278
3279         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3280                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3281                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3282
3283         /* Read back to deassert IRQ immediately to avoid too many
3284          * spurious interrupts.
3285          */
3286         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3287
3288         /* Return here if interrupt is shared and is disabled. */
3289         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3290                 return IRQ_HANDLED;
3291
3292         if (napi_schedule_prep(&bnapi->napi)) {
3293                 bnapi->last_status_idx = sblk->status_idx;
3294                 __napi_schedule(&bnapi->napi);
3295         }
3296
3297         return IRQ_HANDLED;
3298 }
3299
3300 static inline int
3301 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3302 {
3303         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3304         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3305
3306         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3307             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3308                 return 1;
3309         return 0;
3310 }
3311
3312 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3313                                  STATUS_ATTN_BITS_TIMER_ABORT)
3314
3315 static inline int
3316 bnx2_has_work(struct bnx2_napi *bnapi)
3317 {
3318         struct status_block *sblk = bnapi->status_blk.msi;
3319
3320         if (bnx2_has_fast_work(bnapi))
3321                 return 1;
3322
3323 #ifdef BCM_CNIC
3324         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3325                 return 1;
3326 #endif
3327
3328         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3329             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3330                 return 1;
3331
3332         return 0;
3333 }
3334
3335 static void
3336 bnx2_chk_missed_msi(struct bnx2 *bp)
3337 {
3338         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3339         u32 msi_ctrl;
3340
3341         if (bnx2_has_work(bnapi)) {
3342                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3343                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3344                         return;
3345
3346                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3347                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3348                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3349                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3350                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3351                 }
3352         }
3353
3354         bp->idle_chk_status_idx = bnapi->last_status_idx;
3355 }
3356
3357 #ifdef BCM_CNIC
3358 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3359 {
3360         struct cnic_ops *c_ops;
3361
3362         if (!bnapi->cnic_present)
3363                 return;
3364
3365         rcu_read_lock();
3366         c_ops = rcu_dereference(bp->cnic_ops);
3367         if (c_ops)
3368                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3369                                                       bnapi->status_blk.msi);
3370         rcu_read_unlock();
3371 }
3372 #endif
3373
3374 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3375 {
3376         struct status_block *sblk = bnapi->status_blk.msi;
3377         u32 status_attn_bits = sblk->status_attn_bits;
3378         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3379
3380         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3381             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3382
3383                 bnx2_phy_int(bp, bnapi);
3384
3385                 /* This is needed to take care of transient status
3386                  * during link changes.
3387                  */
3388                 REG_WR(bp, BNX2_HC_COMMAND,
3389                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3390                 REG_RD(bp, BNX2_HC_COMMAND);
3391         }
3392 }
3393
3394 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3395                           int work_done, int budget)
3396 {
3397         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3398         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3399
3400         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3401                 bnx2_tx_int(bp, bnapi, 0);
3402
3403         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3404                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3405
3406         return work_done;
3407 }
3408
3409 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3410 {
3411         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3412         struct bnx2 *bp = bnapi->bp;
3413         int work_done = 0;
3414         struct status_block_msix *sblk = bnapi->status_blk.msix;
3415
3416         while (1) {
3417                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3418                 if (unlikely(work_done >= budget))
3419                         break;
3420
3421                 bnapi->last_status_idx = sblk->status_idx;
3422                 /* status idx must be read before checking for more work. */
3423                 rmb();
3424                 if (likely(!bnx2_has_fast_work(bnapi))) {
3425
3426                         napi_complete(napi);
3427                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3428                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3429                                bnapi->last_status_idx);
3430                         break;
3431                 }
3432         }
3433         return work_done;
3434 }
3435
3436 static int bnx2_poll(struct napi_struct *napi, int budget)
3437 {
3438         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3439         struct bnx2 *bp = bnapi->bp;
3440         int work_done = 0;
3441         struct status_block *sblk = bnapi->status_blk.msi;
3442
3443         while (1) {
3444                 bnx2_poll_link(bp, bnapi);
3445
3446                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3447
3448 #ifdef BCM_CNIC
3449                 bnx2_poll_cnic(bp, bnapi);
3450 #endif
3451
3452                 /* bnapi->last_status_idx is used below to tell the hw how
3453                  * much work has been processed, so we must read it before
3454                  * checking for more work.
3455                  */
3456                 bnapi->last_status_idx = sblk->status_idx;
3457
3458                 if (unlikely(work_done >= budget))
3459                         break;
3460
3461                 rmb();
3462                 if (likely(!bnx2_has_work(bnapi))) {
3463                         napi_complete(napi);
3464                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3465                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3466                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3467                                        bnapi->last_status_idx);
3468                                 break;
3469                         }
3470                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3471                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3472                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3473                                bnapi->last_status_idx);
3474
3475                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3476                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3477                                bnapi->last_status_idx);
3478                         break;
3479                 }
3480         }
3481
3482         return work_done;
3483 }
3484
3485 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3486  * from set_multicast.
3487  */
3488 static void
3489 bnx2_set_rx_mode(struct net_device *dev)
3490 {
3491         struct bnx2 *bp = netdev_priv(dev);
3492         u32 rx_mode, sort_mode;
3493         struct netdev_hw_addr *ha;
3494         int i;
3495
3496         if (!netif_running(dev))
3497                 return;
3498
3499         spin_lock_bh(&bp->phy_lock);
3500
3501         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3502                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3503         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3504         if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3505              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3506                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3507         if (dev->flags & IFF_PROMISC) {
3508                 /* Promiscuous mode. */
3509                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3510                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3511                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3512         }
3513         else if (dev->flags & IFF_ALLMULTI) {
3514                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3515                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3516                                0xffffffff);
3517                 }
3518                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3519         }
3520         else {
3521                 /* Accept one or more multicast(s). */
3522                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3523                 u32 regidx;
3524                 u32 bit;
3525                 u32 crc;
3526
3527                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3528
3529                 netdev_for_each_mc_addr(ha, dev) {
3530                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3531                         bit = crc & 0xff;
3532                         regidx = (bit & 0xe0) >> 5;
3533                         bit &= 0x1f;
3534                         mc_filter[regidx] |= (1 << bit);
3535                 }
3536
3537                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3538                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3539                                mc_filter[i]);
3540                 }
3541
3542                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3543         }
3544
3545         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3546                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3547                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3548                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3549         } else if (!(dev->flags & IFF_PROMISC)) {
3550                 /* Add all entries into to the match filter list */
3551                 i = 0;
3552                 netdev_for_each_uc_addr(ha, dev) {
3553                         bnx2_set_mac_addr(bp, ha->addr,
3554                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3555                         sort_mode |= (1 <<
3556                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3557                         i++;
3558                 }
3559
3560         }
3561
3562         if (rx_mode != bp->rx_mode) {
3563                 bp->rx_mode = rx_mode;
3564                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3565         }
3566
3567         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3568         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3569         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3570
3571         spin_unlock_bh(&bp->phy_lock);
3572 }
3573
3574 static int __devinit
3575 check_fw_section(const struct firmware *fw,
3576                  const struct bnx2_fw_file_section *section,
3577                  u32 alignment, bool non_empty)
3578 {
3579         u32 offset = be32_to_cpu(section->offset);
3580         u32 len = be32_to_cpu(section->len);
3581
3582         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3583                 return -EINVAL;
3584         if ((non_empty && len == 0) || len > fw->size - offset ||
3585             len & (alignment - 1))
3586                 return -EINVAL;
3587         return 0;
3588 }
3589
3590 static int __devinit
3591 check_mips_fw_entry(const struct firmware *fw,
3592                     const struct bnx2_mips_fw_file_entry *entry)
3593 {
3594         if (check_fw_section(fw, &entry->text, 4, true) ||
3595             check_fw_section(fw, &entry->data, 4, false) ||
3596             check_fw_section(fw, &entry->rodata, 4, false))
3597                 return -EINVAL;
3598         return 0;
3599 }
3600
3601 static int __devinit
3602 bnx2_request_firmware(struct bnx2 *bp)
3603 {
3604         const char *mips_fw_file, *rv2p_fw_file;
3605         const struct bnx2_mips_fw_file *mips_fw;
3606         const struct bnx2_rv2p_fw_file *rv2p_fw;
3607         int rc;
3608
3609         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3610                 mips_fw_file = FW_MIPS_FILE_09;
3611                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3612                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3613                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3614                 else
3615                         rv2p_fw_file = FW_RV2P_FILE_09;
3616         } else {
3617                 mips_fw_file = FW_MIPS_FILE_06;
3618                 rv2p_fw_file = FW_RV2P_FILE_06;
3619         }
3620
3621         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3622         if (rc) {
3623                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3624                 return rc;
3625         }
3626
3627         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3628         if (rc) {
3629                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3630                 return rc;
3631         }
3632         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3633         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3634         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3635             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3636             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3637             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3638             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3639             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3640                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3641                 return -EINVAL;
3642         }
3643         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3644             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3645             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3646                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3647                 return -EINVAL;
3648         }
3649
3650         return 0;
3651 }
3652
3653 static u32
3654 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3655 {
3656         switch (idx) {
3657         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3658                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3659                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3660                 break;
3661         }
3662         return rv2p_code;
3663 }
3664
3665 static int
3666 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3667              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3668 {
3669         u32 rv2p_code_len, file_offset;
3670         __be32 *rv2p_code;
3671         int i;
3672         u32 val, cmd, addr;
3673
3674         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3675         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3676
3677         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3678
3679         if (rv2p_proc == RV2P_PROC1) {
3680                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3681                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3682         } else {
3683                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3684                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3685         }
3686
3687         for (i = 0; i < rv2p_code_len; i += 8) {
3688                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3689                 rv2p_code++;
3690                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3691                 rv2p_code++;
3692
3693                 val = (i / 8) | cmd;
3694                 REG_WR(bp, addr, val);
3695         }
3696
3697         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3698         for (i = 0; i < 8; i++) {
3699                 u32 loc, code;
3700
3701                 loc = be32_to_cpu(fw_entry->fixup[i]);
3702                 if (loc && ((loc * 4) < rv2p_code_len)) {
3703                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3704                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3705                         code = be32_to_cpu(*(rv2p_code + loc));
3706                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3707                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3708
3709                         val = (loc / 2) | cmd;
3710                         REG_WR(bp, addr, val);
3711                 }
3712         }
3713
3714         /* Reset the processor, un-stall is done later. */
3715         if (rv2p_proc == RV2P_PROC1) {
3716                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3717         }
3718         else {
3719                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3720         }
3721
3722         return 0;
3723 }
3724
3725 static int
3726 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3727             const struct bnx2_mips_fw_file_entry *fw_entry)
3728 {
3729         u32 addr, len, file_offset;
3730         __be32 *data;
3731         u32 offset;
3732         u32 val;
3733
3734         /* Halt the CPU. */
3735         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3736         val |= cpu_reg->mode_value_halt;
3737         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3738         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3739
3740         /* Load the Text area. */
3741         addr = be32_to_cpu(fw_entry->text.addr);
3742         len = be32_to_cpu(fw_entry->text.len);
3743         file_offset = be32_to_cpu(fw_entry->text.offset);
3744         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3745
3746         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3747         if (len) {
3748                 int j;
3749
3750                 for (j = 0; j < (len / 4); j++, offset += 4)
3751                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3752         }
3753
3754         /* Load the Data area. */
3755         addr = be32_to_cpu(fw_entry->data.addr);
3756         len = be32_to_cpu(fw_entry->data.len);
3757         file_offset = be32_to_cpu(fw_entry->data.offset);
3758         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3759
3760         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3761         if (len) {
3762                 int j;
3763
3764                 for (j = 0; j < (len / 4); j++, offset += 4)
3765                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3766         }
3767
3768         /* Load the Read-Only area. */
3769         addr = be32_to_cpu(fw_entry->rodata.addr);
3770         len = be32_to_cpu(fw_entry->rodata.len);
3771         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3772         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3773
3774         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3775         if (len) {
3776                 int j;
3777
3778                 for (j = 0; j < (len / 4); j++, offset += 4)
3779                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3780         }
3781
3782         /* Clear the pre-fetch instruction. */
3783         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3784
3785         val = be32_to_cpu(fw_entry->start_addr);
3786         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3787
3788         /* Start the CPU. */
3789         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3790         val &= ~cpu_reg->mode_value_halt;
3791         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3792         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3793
3794         return 0;
3795 }
3796
3797 static int
3798 bnx2_init_cpus(struct bnx2 *bp)
3799 {
3800         const struct bnx2_mips_fw_file *mips_fw =
3801                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3802         const struct bnx2_rv2p_fw_file *rv2p_fw =
3803                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3804         int rc;
3805
3806         /* Initialize the RV2P processor. */
3807         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3808         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3809
3810         /* Initialize the RX Processor. */
3811         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3812         if (rc)
3813                 goto init_cpu_err;
3814
3815         /* Initialize the TX Processor. */
3816         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3817         if (rc)
3818                 goto init_cpu_err;
3819
3820         /* Initialize the TX Patch-up Processor. */
3821         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3822         if (rc)
3823                 goto init_cpu_err;
3824
3825         /* Initialize the Completion Processor. */
3826         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3827         if (rc)
3828                 goto init_cpu_err;
3829
3830         /* Initialize the Command Processor. */
3831         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3832
3833 init_cpu_err:
3834         return rc;
3835 }
3836
3837 static int
3838 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3839 {
3840         u16 pmcsr;
3841
3842         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3843
3844         switch (state) {
3845         case PCI_D0: {
3846                 u32 val;
3847
3848                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3849                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3850                         PCI_PM_CTRL_PME_STATUS);
3851
3852                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3853                         /* delay required during transition out of D3hot */
3854                         msleep(20);
3855
3856                 val = REG_RD(bp, BNX2_EMAC_MODE);
3857                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3858                 val &= ~BNX2_EMAC_MODE_MPKT;
3859                 REG_WR(bp, BNX2_EMAC_MODE, val);
3860
3861                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3862                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3863                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3864                 break;
3865         }
3866         case PCI_D3hot: {
3867                 int i;
3868                 u32 val, wol_msg;
3869
3870                 if (bp->wol) {
3871                         u32 advertising;
3872                         u8 autoneg;
3873
3874                         autoneg = bp->autoneg;
3875                         advertising = bp->advertising;
3876
3877                         if (bp->phy_port == PORT_TP) {
3878                                 bp->autoneg = AUTONEG_SPEED;
3879                                 bp->advertising = ADVERTISED_10baseT_Half |
3880                                         ADVERTISED_10baseT_Full |
3881                                         ADVERTISED_100baseT_Half |
3882                                         ADVERTISED_100baseT_Full |
3883                                         ADVERTISED_Autoneg;
3884                         }
3885
3886                         spin_lock_bh(&bp->phy_lock);
3887                         bnx2_setup_phy(bp, bp->phy_port);
3888                         spin_unlock_bh(&bp->phy_lock);
3889
3890                         bp->autoneg = autoneg;
3891                         bp->advertising = advertising;
3892
3893                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3894
3895                         val = REG_RD(bp, BNX2_EMAC_MODE);
3896
3897                         /* Enable port mode. */
3898                         val &= ~BNX2_EMAC_MODE_PORT;
3899                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3900                                BNX2_EMAC_MODE_ACPI_RCVD |
3901                                BNX2_EMAC_MODE_MPKT;
3902                         if (bp->phy_port == PORT_TP)
3903                                 val |= BNX2_EMAC_MODE_PORT_MII;
3904                         else {
3905                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3906                                 if (bp->line_speed == SPEED_2500)
3907                                         val |= BNX2_EMAC_MODE_25G_MODE;
3908                         }
3909
3910                         REG_WR(bp, BNX2_EMAC_MODE, val);
3911
3912                         /* receive all multicast */
3913                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3914                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3915                                        0xffffffff);
3916                         }
3917                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3918                                BNX2_EMAC_RX_MODE_SORT_MODE);
3919
3920                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3921                               BNX2_RPM_SORT_USER0_MC_EN;
3922                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3923                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3924                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3925                                BNX2_RPM_SORT_USER0_ENA);
3926
3927                         /* Need to enable EMAC and RPM for WOL. */
3928                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3929                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3930                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3931                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3932
3933                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3934                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3935                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3936
3937                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3938                 }
3939                 else {
3940                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3941                 }
3942
3943                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3944                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3945                                      1, 0);
3946
3947                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3948                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3949                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3950
3951                         if (bp->wol)
3952                                 pmcsr |= 3;
3953                 }
3954                 else {
3955                         pmcsr |= 3;
3956                 }
3957                 if (bp->wol) {
3958                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3959                 }
3960                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3961                                       pmcsr);
3962
3963                 /* No more memory access after this point until
3964                  * device is brought back to D0.
3965                  */
3966                 udelay(50);
3967                 break;
3968         }
3969         default:
3970                 return -EINVAL;
3971         }
3972         return 0;
3973 }
3974
3975 static int
3976 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3977 {
3978         u32 val;
3979         int j;
3980
3981         /* Request access to the flash interface. */
3982         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3983         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3984                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3985                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3986                         break;
3987
3988                 udelay(5);
3989         }
3990
3991         if (j >= NVRAM_TIMEOUT_COUNT)
3992                 return -EBUSY;
3993
3994         return 0;
3995 }
3996
3997 static int
3998 bnx2_release_nvram_lock(struct bnx2 *bp)
3999 {
4000         int j;
4001         u32 val;
4002
4003         /* Relinquish nvram interface. */
4004         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4005
4006         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4007                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4008                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4009                         break;
4010
4011                 udelay(5);
4012         }
4013
4014         if (j >= NVRAM_TIMEOUT_COUNT)
4015                 return -EBUSY;
4016
4017         return 0;
4018 }
4019
4020
4021 static int
4022 bnx2_enable_nvram_write(struct bnx2 *bp)
4023 {
4024         u32 val;
4025
4026         val = REG_RD(bp, BNX2_MISC_CFG);
4027         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4028
4029         if (bp->flash_info->flags & BNX2_NV_WREN) {
4030                 int j;
4031
4032                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4033                 REG_WR(bp, BNX2_NVM_COMMAND,
4034                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4035
4036                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4037                         udelay(5);
4038
4039                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4040                         if (val & BNX2_NVM_COMMAND_DONE)
4041                                 break;
4042                 }
4043
4044                 if (j >= NVRAM_TIMEOUT_COUNT)
4045                         return -EBUSY;
4046         }
4047         return 0;
4048 }
4049
4050 static void
4051 bnx2_disable_nvram_write(struct bnx2 *bp)
4052 {
4053         u32 val;
4054
4055         val = REG_RD(bp, BNX2_MISC_CFG);
4056         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4057 }
4058
4059
4060 static void
4061 bnx2_enable_nvram_access(struct bnx2 *bp)
4062 {
4063         u32 val;
4064
4065         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4066         /* Enable both bits, even on read. */
4067         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4068                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4069 }
4070
4071 static void
4072 bnx2_disable_nvram_access(struct bnx2 *bp)
4073 {
4074         u32 val;
4075
4076         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4077         /* Disable both bits, even after read. */
4078         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4079                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4080                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4081 }
4082
4083 static int
4084 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4085 {
4086         u32 cmd;
4087         int j;
4088
4089         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4090                 /* Buffered flash, no erase needed */
4091                 return 0;
4092
4093         /* Build an erase command */
4094         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4095               BNX2_NVM_COMMAND_DOIT;
4096
4097         /* Need to clear DONE bit separately. */
4098         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4099
4100         /* Address of the NVRAM to read from. */
4101         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4102
4103         /* Issue an erase command. */
4104         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4105
4106         /* Wait for completion. */
4107         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4108                 u32 val;
4109
4110                 udelay(5);
4111
4112                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4113                 if (val & BNX2_NVM_COMMAND_DONE)
4114                         break;
4115         }
4116
4117         if (j >= NVRAM_TIMEOUT_COUNT)
4118                 return -EBUSY;
4119
4120         return 0;
4121 }
4122
4123 static int
4124 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4125 {
4126         u32 cmd;
4127         int j;
4128
4129         /* Build the command word. */
4130         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4131
4132         /* Calculate an offset of a buffered flash, not needed for 5709. */
4133         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4134                 offset = ((offset / bp->flash_info->page_size) <<
4135                            bp->flash_info->page_bits) +
4136                           (offset % bp->flash_info->page_size);
4137         }
4138
4139         /* Need to clear DONE bit separately. */
4140         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4141
4142         /* Address of the NVRAM to read from. */
4143         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4144
4145         /* Issue a read command. */
4146         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4147
4148         /* Wait for completion. */
4149         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4150                 u32 val;
4151
4152                 udelay(5);
4153
4154                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4155                 if (val & BNX2_NVM_COMMAND_DONE) {
4156                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4157                         memcpy(ret_val, &v, 4);
4158                         break;
4159                 }
4160         }
4161         if (j >= NVRAM_TIMEOUT_COUNT)
4162                 return -EBUSY;
4163
4164         return 0;
4165 }
4166
4167
4168 static int
4169 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4170 {
4171         u32 cmd;
4172         __be32 val32;
4173         int j;
4174
4175         /* Build the command word. */
4176         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4177
4178         /* Calculate an offset of a buffered flash, not needed for 5709. */
4179         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4180                 offset = ((offset / bp->flash_info->page_size) <<
4181                           bp->flash_info->page_bits) +
4182                          (offset % bp->flash_info->page_size);
4183         }
4184
4185         /* Need to clear DONE bit separately. */
4186         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4187
4188         memcpy(&val32, val, 4);
4189
4190         /* Write the data. */
4191         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4192
4193         /* Address of the NVRAM to write to. */
4194         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4195
4196         /* Issue the write command. */
4197         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4198
4199         /* Wait for completion. */
4200         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4201                 udelay(5);
4202
4203                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4204                         break;
4205         }
4206         if (j >= NVRAM_TIMEOUT_COUNT)
4207                 return -EBUSY;
4208
4209         return 0;
4210 }
4211
4212 static int
4213 bnx2_init_nvram(struct bnx2 *bp)
4214 {
4215         u32 val;
4216         int j, entry_count, rc = 0;
4217         const struct flash_spec *flash;
4218
4219         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4220                 bp->flash_info = &flash_5709;
4221                 goto get_flash_size;
4222         }
4223
4224         /* Determine the selected interface. */
4225         val = REG_RD(bp, BNX2_NVM_CFG1);
4226
4227         entry_count = ARRAY_SIZE(flash_table);
4228
4229         if (val & 0x40000000) {
4230
4231                 /* Flash interface has been reconfigured */
4232                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4233                      j++, flash++) {
4234                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4235                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4236                                 bp->flash_info = flash;
4237                                 break;
4238                         }
4239                 }
4240         }
4241         else {
4242                 u32 mask;
4243                 /* Not yet been reconfigured */
4244
4245                 if (val & (1 << 23))
4246                         mask = FLASH_BACKUP_STRAP_MASK;
4247                 else
4248                         mask = FLASH_STRAP_MASK;
4249
4250                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4251                         j++, flash++) {
4252
4253                         if ((val & mask) == (flash->strapping & mask)) {
4254                                 bp->flash_info = flash;
4255
4256                                 /* Request access to the flash interface. */
4257                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4258                                         return rc;
4259
4260                                 /* Enable access to flash interface */
4261                                 bnx2_enable_nvram_access(bp);
4262
4263                                 /* Reconfigure the flash interface */
4264                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4265                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4266                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4267                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4268
4269                                 /* Disable access to flash interface */
4270                                 bnx2_disable_nvram_access(bp);
4271                                 bnx2_release_nvram_lock(bp);
4272
4273                                 break;
4274                         }
4275                 }
4276         } /* if (val & 0x40000000) */
4277
4278         if (j == entry_count) {
4279                 bp->flash_info = NULL;
4280                 pr_alert("Unknown flash/EEPROM type\n");
4281                 return -ENODEV;
4282         }
4283
4284 get_flash_size:
4285         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4286         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4287         if (val)
4288                 bp->flash_size = val;
4289         else
4290                 bp->flash_size = bp->flash_info->total_size;
4291
4292         return rc;
4293 }
4294
4295 static int
4296 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4297                 int buf_size)
4298 {
4299         int rc = 0;
4300         u32 cmd_flags, offset32, len32, extra;
4301
4302         if (buf_size == 0)
4303                 return 0;
4304
4305         /* Request access to the flash interface. */
4306         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4307                 return rc;
4308
4309         /* Enable access to flash interface */
4310         bnx2_enable_nvram_access(bp);
4311
4312         len32 = buf_size;
4313         offset32 = offset;
4314         extra = 0;
4315
4316         cmd_flags = 0;
4317
4318         if (offset32 & 3) {
4319                 u8 buf[4];
4320                 u32 pre_len;
4321
4322                 offset32 &= ~3;
4323                 pre_len = 4 - (offset & 3);
4324
4325                 if (pre_len >= len32) {
4326                         pre_len = len32;
4327                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4328                                     BNX2_NVM_COMMAND_LAST;
4329                 }
4330                 else {
4331                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4332                 }
4333
4334                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4335
4336                 if (rc)
4337                         return rc;
4338
4339                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4340
4341                 offset32 += 4;
4342                 ret_buf += pre_len;
4343                 len32 -= pre_len;
4344         }
4345         if (len32 & 3) {
4346                 extra = 4 - (len32 & 3);
4347                 len32 = (len32 + 4) & ~3;
4348         }
4349
4350         if (len32 == 4) {
4351                 u8 buf[4];
4352
4353                 if (cmd_flags)
4354                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4355                 else
4356                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4357                                     BNX2_NVM_COMMAND_LAST;
4358
4359                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4360
4361                 memcpy(ret_buf, buf, 4 - extra);
4362         }
4363         else if (len32 > 0) {
4364                 u8 buf[4];
4365
4366                 /* Read the first word. */
4367                 if (cmd_flags)
4368                         cmd_flags = 0;
4369                 else
4370                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4371
4372                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4373
4374                 /* Advance to the next dword. */
4375                 offset32 += 4;
4376                 ret_buf += 4;
4377                 len32 -= 4;
4378
4379                 while (len32 > 4 && rc == 0) {
4380                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4381
4382                         /* Advance to the next dword. */
4383                         offset32 += 4;
4384                         ret_buf += 4;
4385                         len32 -= 4;
4386                 }
4387
4388                 if (rc)
4389                         return rc;
4390
4391                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4392                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4393
4394                 memcpy(ret_buf, buf, 4 - extra);
4395         }
4396
4397         /* Disable access to flash interface */
4398         bnx2_disable_nvram_access(bp);
4399
4400         bnx2_release_nvram_lock(bp);
4401
4402         return rc;
4403 }
4404
4405 static int
4406 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4407                 int buf_size)
4408 {
4409         u32 written, offset32, len32;
4410         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4411         int rc = 0;
4412         int align_start, align_end;
4413
4414         buf = data_buf;
4415         offset32 = offset;
4416         len32 = buf_size;
4417         align_start = align_end = 0;
4418
4419         if ((align_start = (offset32 & 3))) {
4420                 offset32 &= ~3;
4421                 len32 += align_start;
4422                 if (len32 < 4)
4423                         len32 = 4;
4424                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4425                         return rc;
4426         }
4427
4428         if (len32 & 3) {
4429                 align_end = 4 - (len32 & 3);
4430                 len32 += align_end;
4431                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4432                         return rc;
4433         }
4434
4435         if (align_start || align_end) {
4436                 align_buf = kmalloc(len32, GFP_KERNEL);
4437                 if (align_buf == NULL)
4438                         return -ENOMEM;
4439                 if (align_start) {
4440                         memcpy(align_buf, start, 4);
4441                 }
4442                 if (align_end) {
4443                         memcpy(align_buf + len32 - 4, end, 4);
4444                 }
4445                 memcpy(align_buf + align_start, data_buf, buf_size);
4446                 buf = align_buf;
4447         }
4448
4449         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4450                 flash_buffer = kmalloc(264, GFP_KERNEL);
4451                 if (flash_buffer == NULL) {
4452                         rc = -ENOMEM;
4453                         goto nvram_write_end;
4454                 }
4455         }
4456
4457         written = 0;
4458         while ((written < len32) && (rc == 0)) {
4459                 u32 page_start, page_end, data_start, data_end;
4460                 u32 addr, cmd_flags;
4461                 int i;
4462
4463                 /* Find the page_start addr */
4464                 page_start = offset32 + written;
4465                 page_start -= (page_start % bp->flash_info->page_size);
4466                 /* Find the page_end addr */
4467                 page_end = page_start + bp->flash_info->page_size;
4468                 /* Find the data_start addr */
4469                 data_start = (written == 0) ? offset32 : page_start;
4470                 /* Find the data_end addr */
4471                 data_end = (page_end > offset32 + len32) ?
4472                         (offset32 + len32) : page_end;
4473
4474                 /* Request access to the flash interface. */
4475                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4476                         goto nvram_write_end;
4477
4478                 /* Enable access to flash interface */
4479                 bnx2_enable_nvram_access(bp);
4480
4481                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4482                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4483                         int j;
4484
4485                         /* Read the whole page into the buffer
4486                          * (non-buffer flash only) */
4487                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4488                                 if (j == (bp->flash_info->page_size - 4)) {
4489                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4490                                 }
4491                                 rc = bnx2_nvram_read_dword(bp,
4492                                         page_start + j,
4493                                         &flash_buffer[j],
4494                                         cmd_flags);
4495
4496                                 if (rc)
4497                                         goto nvram_write_end;
4498
4499                                 cmd_flags = 0;
4500                         }
4501                 }
4502
4503                 /* Enable writes to flash interface (unlock write-protect) */
4504                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4505                         goto nvram_write_end;
4506
4507                 /* Loop to write back the buffer data from page_start to
4508                  * data_start */
4509                 i = 0;
4510                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4511                         /* Erase the page */
4512                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4513                                 goto nvram_write_end;
4514
4515                         /* Re-enable the write again for the actual write */
4516                         bnx2_enable_nvram_write(bp);
4517
4518                         for (addr = page_start; addr < data_start;
4519                                 addr += 4, i += 4) {
4520
4521                                 rc = bnx2_nvram_write_dword(bp, addr,
4522                                         &flash_buffer[i], cmd_flags);
4523
4524                                 if (rc != 0)
4525                                         goto nvram_write_end;
4526
4527                                 cmd_flags = 0;
4528                         }
4529                 }
4530
4531                 /* Loop to write the new data from data_start to data_end */
4532                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4533                         if ((addr == page_end - 4) ||
4534                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4535                                  (addr == data_end - 4))) {
4536
4537                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4538                         }
4539                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4540                                 cmd_flags);
4541
4542                         if (rc != 0)
4543                                 goto nvram_write_end;
4544
4545                         cmd_flags = 0;
4546                         buf += 4;
4547                 }
4548
4549                 /* Loop to write back the buffer data from data_end
4550                  * to page_end */
4551                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4552                         for (addr = data_end; addr < page_end;
4553                                 addr += 4, i += 4) {
4554
4555                                 if (addr == page_end-4) {
4556                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4557                                 }
4558                                 rc = bnx2_nvram_write_dword(bp, addr,
4559                                         &flash_buffer[i], cmd_flags);
4560
4561                                 if (rc != 0)
4562                                         goto nvram_write_end;
4563
4564                                 cmd_flags = 0;
4565                         }
4566                 }
4567
4568                 /* Disable writes to flash interface (lock write-protect) */
4569                 bnx2_disable_nvram_write(bp);
4570
4571                 /* Disable access to flash interface */
4572                 bnx2_disable_nvram_access(bp);
4573                 bnx2_release_nvram_lock(bp);
4574
4575                 /* Increment written */
4576                 written += data_end - data_start;
4577         }
4578
4579 nvram_write_end:
4580         kfree(flash_buffer);
4581         kfree(align_buf);
4582         return rc;
4583 }
4584
4585 static void
4586 bnx2_init_fw_cap(struct bnx2 *bp)
4587 {
4588         u32 val, sig = 0;
4589
4590         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4591         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4592
4593         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4594                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4595
4596         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4597         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4598                 return;
4599
4600         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4601                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4602                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4603         }
4604
4605         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4606             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4607                 u32 link;
4608
4609                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4610
4611                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4612                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4613                         bp->phy_port = PORT_FIBRE;
4614                 else
4615                         bp->phy_port = PORT_TP;
4616
4617                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4618                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4619         }
4620
4621         if (netif_running(bp->dev) && sig)
4622                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4623 }
4624
4625 static void
4626 bnx2_setup_msix_tbl(struct bnx2 *bp)
4627 {
4628         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4629
4630         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4631         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4632 }
4633
4634 static int
4635 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4636 {
4637         u32 val;
4638         int i, rc = 0;
4639         u8 old_port;
4640
4641         /* Wait for the current PCI transaction to complete before
4642          * issuing a reset. */
4643         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4644                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4645                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4646                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4647                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4648         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4649         udelay(5);
4650
4651         /* Wait for the firmware to tell us it is ok to issue a reset. */
4652         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4653
4654         /* Deposit a driver reset signature so the firmware knows that
4655          * this is a soft reset. */
4656         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4657                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4658
4659         /* Do a dummy read to force the chip to complete all current transaction
4660          * before we issue a reset. */
4661         val = REG_RD(bp, BNX2_MISC_ID);
4662
4663         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4664                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4665                 REG_RD(bp, BNX2_MISC_COMMAND);
4666                 udelay(5);
4667
4668                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4669                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4670
4671                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4672
4673         } else {
4674                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4675                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4676                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4677
4678                 /* Chip reset. */
4679                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4680
4681                 /* Reading back any register after chip reset will hang the
4682                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4683                  * of margin for write posting.
4684                  */
4685                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4686                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4687                         msleep(20);
4688
4689                 /* Reset takes approximate 30 usec */
4690                 for (i = 0; i < 10; i++) {
4691                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4692                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4693                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4694                                 break;
4695                         udelay(10);
4696                 }
4697
4698                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4699                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4700                         pr_err("Chip reset did not complete\n");
4701                         return -EBUSY;
4702                 }
4703         }
4704
4705         /* Make sure byte swapping is properly configured. */
4706         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4707         if (val != 0x01020304) {
4708                 pr_err("Chip not in correct endian mode\n");
4709                 return -ENODEV;
4710         }
4711
4712         /* Wait for the firmware to finish its initialization. */
4713         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4714         if (rc)
4715                 return rc;
4716
4717         spin_lock_bh(&bp->phy_lock);
4718         old_port = bp->phy_port;
4719         bnx2_init_fw_cap(bp);
4720         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4721             old_port != bp->phy_port)
4722                 bnx2_set_default_remote_link(bp);
4723         spin_unlock_bh(&bp->phy_lock);
4724
4725         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4726                 /* Adjust the voltage regular to two steps lower.  The default
4727                  * of this register is 0x0000000e. */
4728                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4729
4730                 /* Remove bad rbuf memory from the free pool. */
4731                 rc = bnx2_alloc_bad_rbuf(bp);
4732         }
4733
4734         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4735                 bnx2_setup_msix_tbl(bp);
4736                 /* Prevent MSIX table reads and write from timing out */
4737                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4738                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4739         }
4740
4741         return rc;
4742 }
4743
4744 static int
4745 bnx2_init_chip(struct bnx2 *bp)
4746 {
4747         u32 val, mtu;
4748         int rc, i;
4749
4750         /* Make sure the interrupt is not active. */
4751         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4752
4753         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4754               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4755 #ifdef __BIG_ENDIAN
4756               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4757 #endif
4758               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4759               DMA_READ_CHANS << 12 |
4760               DMA_WRITE_CHANS << 16;
4761
4762         val |= (0x2 << 20) | (1 << 11);
4763
4764         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4765                 val |= (1 << 23);
4766
4767         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4768             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4769                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4770
4771         REG_WR(bp, BNX2_DMA_CONFIG, val);
4772
4773         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4774                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4775                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4776                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4777         }
4778
4779         if (bp->flags & BNX2_FLAG_PCIX) {
4780                 u16 val16;
4781
4782                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4783                                      &val16);
4784                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4785                                       val16 & ~PCI_X_CMD_ERO);
4786         }
4787
4788         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4789                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4790                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4791                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4792
4793         /* Initialize context mapping and zero out the quick contexts.  The
4794          * context block must have already been enabled. */
4795         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4796                 rc = bnx2_init_5709_context(bp);
4797                 if (rc)
4798                         return rc;
4799         } else
4800                 bnx2_init_context(bp);
4801
4802         if ((rc = bnx2_init_cpus(bp)) != 0)
4803                 return rc;
4804
4805         bnx2_init_nvram(bp);
4806
4807         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4808
4809         val = REG_RD(bp, BNX2_MQ_CONFIG);
4810         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4811         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4812         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4813                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4814                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4815                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4816         }
4817
4818         REG_WR(bp, BNX2_MQ_CONFIG, val);
4819
4820         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4821         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4822         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4823
4824         val = (BCM_PAGE_BITS - 8) << 24;
4825         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4826
4827         /* Configure page size. */
4828         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4829         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4830         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4831         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4832
4833         val = bp->mac_addr[0] +
4834               (bp->mac_addr[1] << 8) +
4835               (bp->mac_addr[2] << 16) +
4836               bp->mac_addr[3] +
4837               (bp->mac_addr[4] << 8) +
4838               (bp->mac_addr[5] << 16);
4839         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4840
4841         /* Program the MTU.  Also include 4 bytes for CRC32. */
4842         mtu = bp->dev->mtu;
4843         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4844         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4845                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4846         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4847
4848         if (mtu < 1500)
4849                 mtu = 1500;
4850
4851         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4852         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4853         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4854
4855         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4856         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4857                 bp->bnx2_napi[i].last_status_idx = 0;
4858
4859         bp->idle_chk_status_idx = 0xffff;
4860
4861         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4862
4863         /* Set up how to generate a link change interrupt. */
4864         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4865
4866         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4867                (u64) bp->status_blk_mapping & 0xffffffff);
4868         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4869
4870         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4871                (u64) bp->stats_blk_mapping & 0xffffffff);
4872         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4873                (u64) bp->stats_blk_mapping >> 32);
4874
4875         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4876                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4877
4878         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4879                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4880
4881         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4882                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4883
4884         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4885
4886         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4887
4888         REG_WR(bp, BNX2_HC_COM_TICKS,
4889                (bp->com_ticks_int << 16) | bp->com_ticks);
4890
4891         REG_WR(bp, BNX2_HC_CMD_TICKS,
4892                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4893
4894         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4895                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4896         else
4897                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4898         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4899
4900         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4901                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4902         else {
4903                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4904                       BNX2_HC_CONFIG_COLLECT_STATS;
4905         }
4906
4907         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4908                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4909                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4910
4911                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4912         }
4913
4914         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4915                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4916
4917         REG_WR(bp, BNX2_HC_CONFIG, val);
4918
4919         if (bp->rx_ticks < 25)
4920                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4921         else
4922                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4923
4924         for (i = 1; i < bp->irq_nvecs; i++) {
4925                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4926                            BNX2_HC_SB_CONFIG_1;
4927
4928                 REG_WR(bp, base,
4929                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4930                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4931                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4932
4933                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4934                         (bp->tx_quick_cons_trip_int << 16) |
4935                          bp->tx_quick_cons_trip);
4936
4937                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4938                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4939
4940                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4941                        (bp->rx_quick_cons_trip_int << 16) |
4942                         bp->rx_quick_cons_trip);
4943
4944                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4945                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4946         }
4947
4948         /* Clear internal stats counters. */
4949         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4950
4951         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4952
4953         /* Initialize the receive filter. */
4954         bnx2_set_rx_mode(bp->dev);
4955
4956         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4957                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4958                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4959                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4960         }
4961         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4962                           1, 0);
4963
4964         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4965         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4966
4967         udelay(20);
4968
4969         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4970
4971         return rc;
4972 }
4973
4974 static void
4975 bnx2_clear_ring_states(struct bnx2 *bp)
4976 {
4977         struct bnx2_napi *bnapi;
4978         struct bnx2_tx_ring_info *txr;
4979         struct bnx2_rx_ring_info *rxr;
4980         int i;
4981
4982         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4983                 bnapi = &bp->bnx2_napi[i];
4984                 txr = &bnapi->tx_ring;
4985                 rxr = &bnapi->rx_ring;
4986
4987                 txr->tx_cons = 0;
4988                 txr->hw_tx_cons = 0;
4989                 rxr->rx_prod_bseq = 0;
4990                 rxr->rx_prod = 0;
4991                 rxr->rx_cons = 0;
4992                 rxr->rx_pg_prod = 0;
4993                 rxr->rx_pg_cons = 0;
4994         }
4995 }
4996
4997 static void
4998 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4999 {
5000         u32 val, offset0, offset1, offset2, offset3;
5001         u32 cid_addr = GET_CID_ADDR(cid);
5002
5003         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5004                 offset0 = BNX2_L2CTX_TYPE_XI;
5005                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5006                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5007                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5008         } else {
5009                 offset0 = BNX2_L2CTX_TYPE;
5010                 offset1 = BNX2_L2CTX_CMD_TYPE;
5011                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5012                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5013         }
5014         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5015         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5016
5017         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5018         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5019
5020         val = (u64) txr->tx_desc_mapping >> 32;
5021         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5022
5023         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5024         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5025 }
5026
5027 static void
5028 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5029 {
5030         struct tx_bd *txbd;
5031         u32 cid = TX_CID;
5032         struct bnx2_napi *bnapi;
5033         struct bnx2_tx_ring_info *txr;
5034
5035         bnapi = &bp->bnx2_napi[ring_num];
5036         txr = &bnapi->tx_ring;
5037
5038         if (ring_num == 0)
5039                 cid = TX_CID;
5040         else
5041                 cid = TX_TSS_CID + ring_num - 1;
5042
5043         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5044
5045         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5046
5047         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5048         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5049
5050         txr->tx_prod = 0;
5051         txr->tx_prod_bseq = 0;
5052
5053         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5054         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5055
5056         bnx2_init_tx_context(bp, cid, txr);
5057 }
5058
5059 static void
5060 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5061                      int num_rings)
5062 {
5063         int i;
5064         struct rx_bd *rxbd;
5065
5066         for (i = 0; i < num_rings; i++) {
5067                 int j;
5068
5069                 rxbd = &rx_ring[i][0];
5070                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5071                         rxbd->rx_bd_len = buf_size;
5072                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5073                 }
5074                 if (i == (num_rings - 1))
5075                         j = 0;
5076                 else
5077                         j = i + 1;
5078                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5079                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5080         }
5081 }
5082
5083 static void
5084 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5085 {
5086         int i;
5087         u16 prod, ring_prod;
5088         u32 cid, rx_cid_addr, val;
5089         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5090         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5091
5092         if (ring_num == 0)
5093                 cid = RX_CID;
5094         else
5095                 cid = RX_RSS_CID + ring_num - 1;
5096
5097         rx_cid_addr = GET_CID_ADDR(cid);
5098
5099         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5100                              bp->rx_buf_use_size, bp->rx_max_ring);
5101
5102         bnx2_init_rx_context(bp, cid);
5103
5104         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5105                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5106                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5107         }
5108
5109         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5110         if (bp->rx_pg_ring_size) {
5111                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5112                                      rxr->rx_pg_desc_mapping,
5113                                      PAGE_SIZE, bp->rx_max_pg_ring);
5114                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5115                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5116                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5117                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5118
5119                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5120                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5121
5122                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5123                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5124
5125                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5126                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5127         }
5128
5129         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5130         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5131
5132         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5133         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5134
5135         ring_prod = prod = rxr->rx_pg_prod;
5136         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5137                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5138                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5139                                     ring_num, i, bp->rx_pg_ring_size);
5140                         break;
5141                 }
5142                 prod = NEXT_RX_BD(prod);
5143                 ring_prod = RX_PG_RING_IDX(prod);
5144         }
5145         rxr->rx_pg_prod = prod;
5146
5147         ring_prod = prod = rxr->rx_prod;
5148         for (i = 0; i < bp->rx_ring_size; i++) {
5149                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5150                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5151                                     ring_num, i, bp->rx_ring_size);
5152                         break;
5153                 }
5154                 prod = NEXT_RX_BD(prod);
5155                 ring_prod = RX_RING_IDX(prod);
5156         }
5157         rxr->rx_prod = prod;
5158
5159         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5160         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5161         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5162
5163         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5164         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5165
5166         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5167 }
5168
5169 static void
5170 bnx2_init_all_rings(struct bnx2 *bp)
5171 {
5172         int i;
5173         u32 val;
5174
5175         bnx2_clear_ring_states(bp);
5176
5177         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5178         for (i = 0; i < bp->num_tx_rings; i++)
5179                 bnx2_init_tx_ring(bp, i);
5180
5181         if (bp->num_tx_rings > 1)
5182                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5183                        (TX_TSS_CID << 7));
5184
5185         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5186         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5187
5188         for (i = 0; i < bp->num_rx_rings; i++)
5189                 bnx2_init_rx_ring(bp, i);
5190
5191         if (bp->num_rx_rings > 1) {
5192                 u32 tbl_32 = 0;
5193
5194                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5195                         int shift = (i % 8) << 2;
5196
5197                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5198                         if ((i % 8) == 7) {
5199                                 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5200                                 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5201                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5202                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5203                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5204                                 tbl_32 = 0;
5205                         }
5206                 }
5207
5208                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5209                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5210
5211                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5212
5213         }
5214 }
5215
5216 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5217 {
5218         u32 max, num_rings = 1;
5219
5220         while (ring_size > MAX_RX_DESC_CNT) {
5221                 ring_size -= MAX_RX_DESC_CNT;
5222                 num_rings++;
5223         }
5224         /* round to next power of 2 */
5225         max = max_size;
5226         while ((max & num_rings) == 0)
5227                 max >>= 1;
5228
5229         if (num_rings != max)
5230                 max <<= 1;
5231
5232         return max;
5233 }
5234
5235 static void
5236 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5237 {
5238         u32 rx_size, rx_space, jumbo_size;
5239
5240         /* 8 for CRC and VLAN */
5241         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5242
5243         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5244                 sizeof(struct skb_shared_info);
5245
5246         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5247         bp->rx_pg_ring_size = 0;
5248         bp->rx_max_pg_ring = 0;
5249         bp->rx_max_pg_ring_idx = 0;
5250         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5251                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5252
5253                 jumbo_size = size * pages;
5254                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5255                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5256
5257                 bp->rx_pg_ring_size = jumbo_size;
5258                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5259                                                         MAX_RX_PG_RINGS);
5260                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5261                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5262                 bp->rx_copy_thresh = 0;
5263         }
5264
5265         bp->rx_buf_use_size = rx_size;
5266         /* hw alignment */
5267         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5268         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5269         bp->rx_ring_size = size;
5270         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5271         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5272 }
5273
5274 static void
5275 bnx2_free_tx_skbs(struct bnx2 *bp)
5276 {
5277         int i;
5278
5279         for (i = 0; i < bp->num_tx_rings; i++) {
5280                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5281                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5282                 int j;
5283
5284                 if (txr->tx_buf_ring == NULL)
5285                         continue;
5286
5287                 for (j = 0; j < TX_DESC_CNT; ) {
5288                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5289                         struct sk_buff *skb = tx_buf->skb;
5290                         int k, last;
5291
5292                         if (skb == NULL) {
5293                                 j++;
5294                                 continue;
5295                         }
5296
5297                         dma_unmap_single(&bp->pdev->dev,
5298                                          dma_unmap_addr(tx_buf, mapping),
5299                                          skb_headlen(skb),
5300                                          PCI_DMA_TODEVICE);
5301
5302                         tx_buf->skb = NULL;
5303
5304                         last = tx_buf->nr_frags;
5305                         j++;
5306                         for (k = 0; k < last; k++, j++) {
5307                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5308                                 dma_unmap_page(&bp->pdev->dev,
5309                                         dma_unmap_addr(tx_buf, mapping),
5310                                         skb_shinfo(skb)->frags[k].size,
5311                                         PCI_DMA_TODEVICE);
5312                         }
5313                         dev_kfree_skb(skb);
5314                 }
5315         }
5316 }
5317
5318 static void
5319 bnx2_free_rx_skbs(struct bnx2 *bp)
5320 {
5321         int i;
5322
5323         for (i = 0; i < bp->num_rx_rings; i++) {
5324                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5325                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5326                 int j;
5327
5328                 if (rxr->rx_buf_ring == NULL)
5329                         return;
5330
5331                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5332                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5333                         struct sk_buff *skb = rx_buf->skb;
5334
5335                         if (skb == NULL)
5336                                 continue;
5337
5338                         dma_unmap_single(&bp->pdev->dev,
5339                                          dma_unmap_addr(rx_buf, mapping),
5340                                          bp->rx_buf_use_size,
5341                                          PCI_DMA_FROMDEVICE);
5342
5343                         rx_buf->skb = NULL;
5344
5345                         dev_kfree_skb(skb);
5346                 }
5347                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5348                         bnx2_free_rx_page(bp, rxr, j);
5349         }
5350 }
5351
5352 static void
5353 bnx2_free_skbs(struct bnx2 *bp)
5354 {
5355         bnx2_free_tx_skbs(bp);
5356         bnx2_free_rx_skbs(bp);
5357 }
5358
5359 static int
5360 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5361 {
5362         int rc;
5363
5364         rc = bnx2_reset_chip(bp, reset_code);
5365         bnx2_free_skbs(bp);
5366         if (rc)
5367                 return rc;
5368
5369         if ((rc = bnx2_init_chip(bp)) != 0)
5370                 return rc;
5371
5372         bnx2_init_all_rings(bp);
5373         return 0;
5374 }
5375
5376 static int
5377 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5378 {
5379         int rc;
5380
5381         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5382                 return rc;
5383
5384         spin_lock_bh(&bp->phy_lock);
5385         bnx2_init_phy(bp, reset_phy);
5386         bnx2_set_link(bp);
5387         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5388                 bnx2_remote_phy_event(bp);
5389         spin_unlock_bh(&bp->phy_lock);
5390         return 0;
5391 }
5392
5393 static int
5394 bnx2_shutdown_chip(struct bnx2 *bp)
5395 {
5396         u32 reset_code;
5397
5398         if (bp->flags & BNX2_FLAG_NO_WOL)
5399                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5400         else if (bp->wol)
5401                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5402         else
5403                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5404
5405         return bnx2_reset_chip(bp, reset_code);
5406 }
5407
5408 static int
5409 bnx2_test_registers(struct bnx2 *bp)
5410 {
5411         int ret;
5412         int i, is_5709;
5413         static const struct {
5414                 u16   offset;
5415                 u16   flags;
5416 #define BNX2_FL_NOT_5709        1
5417                 u32   rw_mask;
5418                 u32   ro_mask;
5419         } reg_tbl[] = {
5420                 { 0x006c, 0, 0x00000000, 0x0000003f },
5421                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5422                 { 0x0094, 0, 0x00000000, 0x00000000 },
5423
5424                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5425                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5426                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5427                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5428                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5429                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5430                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5431                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5432                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5433
5434                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5435                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5436                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5437                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5438                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5439                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5440
5441                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5442                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5443                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5444
5445                 { 0x1000, 0, 0x00000000, 0x00000001 },
5446                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5447
5448                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5449                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5450                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5451                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5452                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5453                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5454                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5455                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5456                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5457                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5458
5459                 { 0x1800, 0, 0x00000000, 0x00000001 },
5460                 { 0x1804, 0, 0x00000000, 0x00000003 },
5461
5462                 { 0x2800, 0, 0x00000000, 0x00000001 },
5463                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5464                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5465                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5466                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5467                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5468                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5469                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5470                 { 0x2840, 0, 0x00000000, 0xffffffff },
5471                 { 0x2844, 0, 0x00000000, 0xffffffff },
5472                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5473                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5474
5475                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5476                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5477
5478                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5479                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5480                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5481                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5482                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5483                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5484                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5485                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5486                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5487
5488                 { 0x5004, 0, 0x00000000, 0x0000007f },
5489                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5490
5491                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5492                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5493                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5494                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5495                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5496                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5497                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5498                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5499                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5500
5501                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5502                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5503                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5504                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5505                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5506                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5507                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5508                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5509                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5510                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5511                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5512                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5513                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5514                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5515                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5516                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5517                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5518                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5519                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5520                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5521                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5522                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5523                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5524
5525                 { 0xffff, 0, 0x00000000, 0x00000000 },
5526         };
5527
5528         ret = 0;
5529         is_5709 = 0;
5530         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5531                 is_5709 = 1;
5532
5533         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5534                 u32 offset, rw_mask, ro_mask, save_val, val;
5535                 u16 flags = reg_tbl[i].flags;
5536
5537                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5538                         continue;
5539
5540                 offset = (u32) reg_tbl[i].offset;
5541                 rw_mask = reg_tbl[i].rw_mask;
5542                 ro_mask = reg_tbl[i].ro_mask;
5543
5544                 save_val = readl(bp->regview + offset);
5545
5546                 writel(0, bp->regview + offset);
5547
5548                 val = readl(bp->regview + offset);
5549                 if ((val & rw_mask) != 0) {
5550                         goto reg_test_err;
5551                 }
5552
5553                 if ((val & ro_mask) != (save_val & ro_mask)) {
5554                         goto reg_test_err;
5555                 }
5556
5557                 writel(0xffffffff, bp->regview + offset);
5558
5559                 val = readl(bp->regview + offset);
5560                 if ((val & rw_mask) != rw_mask) {
5561                         goto reg_test_err;
5562                 }
5563
5564                 if ((val & ro_mask) != (save_val & ro_mask)) {
5565                         goto reg_test_err;
5566                 }
5567
5568                 writel(save_val, bp->regview + offset);
5569                 continue;
5570
5571 reg_test_err:
5572                 writel(save_val, bp->regview + offset);
5573                 ret = -ENODEV;
5574                 break;
5575         }
5576         return ret;
5577 }
5578
5579 static int
5580 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5581 {
5582         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5583                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5584         int i;
5585
5586         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5587                 u32 offset;
5588
5589                 for (offset = 0; offset < size; offset += 4) {
5590
5591                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5592
5593                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5594                                 test_pattern[i]) {
5595                                 return -ENODEV;
5596                         }
5597                 }
5598         }
5599         return 0;
5600 }
5601
5602 static int
5603 bnx2_test_memory(struct bnx2 *bp)
5604 {
5605         int ret = 0;
5606         int i;
5607         static struct mem_entry {
5608                 u32   offset;
5609                 u32   len;
5610         } mem_tbl_5706[] = {
5611                 { 0x60000,  0x4000 },
5612                 { 0xa0000,  0x3000 },
5613                 { 0xe0000,  0x4000 },
5614                 { 0x120000, 0x4000 },
5615                 { 0x1a0000, 0x4000 },
5616                 { 0x160000, 0x4000 },
5617                 { 0xffffffff, 0    },
5618         },
5619         mem_tbl_5709[] = {
5620                 { 0x60000,  0x4000 },
5621                 { 0xa0000,  0x3000 },
5622                 { 0xe0000,  0x4000 },
5623                 { 0x120000, 0x4000 },
5624                 { 0x1a0000, 0x4000 },
5625                 { 0xffffffff, 0    },
5626         };
5627         struct mem_entry *mem_tbl;
5628
5629         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5630                 mem_tbl = mem_tbl_5709;
5631         else
5632                 mem_tbl = mem_tbl_5706;
5633
5634         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5635                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5636                         mem_tbl[i].len)) != 0) {
5637                         return ret;
5638                 }
5639         }
5640
5641         return ret;
5642 }
5643
5644 #define BNX2_MAC_LOOPBACK       0
5645 #define BNX2_PHY_LOOPBACK       1
5646
5647 static int
5648 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5649 {
5650         unsigned int pkt_size, num_pkts, i;
5651         struct sk_buff *skb, *rx_skb;
5652         unsigned char *packet;
5653         u16 rx_start_idx, rx_idx;
5654         dma_addr_t map;
5655         struct tx_bd *txbd;
5656         struct sw_bd *rx_buf;
5657         struct l2_fhdr *rx_hdr;
5658         int ret = -ENODEV;
5659         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5660         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5661         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5662
5663         tx_napi = bnapi;
5664
5665         txr = &tx_napi->tx_ring;
5666         rxr = &bnapi->rx_ring;
5667         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5668                 bp->loopback = MAC_LOOPBACK;
5669                 bnx2_set_mac_loopback(bp);
5670         }
5671         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5672                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5673                         return 0;
5674
5675                 bp->loopback = PHY_LOOPBACK;
5676                 bnx2_set_phy_loopback(bp);
5677         }
5678         else
5679                 return -EINVAL;
5680
5681         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5682         skb = netdev_alloc_skb(bp->dev, pkt_size);
5683         if (!skb)
5684                 return -ENOMEM;
5685         packet = skb_put(skb, pkt_size);
5686         memcpy(packet, bp->dev->dev_addr, 6);
5687         memset(packet + 6, 0x0, 8);
5688         for (i = 14; i < pkt_size; i++)
5689                 packet[i] = (unsigned char) (i & 0xff);
5690
5691         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5692                              PCI_DMA_TODEVICE);
5693         if (dma_mapping_error(&bp->pdev->dev, map)) {
5694                 dev_kfree_skb(skb);
5695                 return -EIO;
5696         }
5697
5698         REG_WR(bp, BNX2_HC_COMMAND,
5699                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5700
5701         REG_RD(bp, BNX2_HC_COMMAND);
5702
5703         udelay(5);
5704         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5705
5706         num_pkts = 0;
5707
5708         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5709
5710         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5711         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5712         txbd->tx_bd_mss_nbytes = pkt_size;
5713         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5714
5715         num_pkts++;
5716         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5717         txr->tx_prod_bseq += pkt_size;
5718
5719         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5720         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5721
5722         udelay(100);
5723
5724         REG_WR(bp, BNX2_HC_COMMAND,
5725                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5726
5727         REG_RD(bp, BNX2_HC_COMMAND);
5728
5729         udelay(5);
5730
5731         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5732         dev_kfree_skb(skb);
5733
5734         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5735                 goto loopback_test_done;
5736
5737         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5738         if (rx_idx != rx_start_idx + num_pkts) {
5739                 goto loopback_test_done;
5740         }
5741
5742         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5743         rx_skb = rx_buf->skb;
5744
5745         rx_hdr = rx_buf->desc;
5746         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5747
5748         dma_sync_single_for_cpu(&bp->pdev->dev,
5749                 dma_unmap_addr(rx_buf, mapping),
5750                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5751
5752         if (rx_hdr->l2_fhdr_status &
5753                 (L2_FHDR_ERRORS_BAD_CRC |
5754                 L2_FHDR_ERRORS_PHY_DECODE |
5755                 L2_FHDR_ERRORS_ALIGNMENT |
5756                 L2_FHDR_ERRORS_TOO_SHORT |
5757                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5758
5759                 goto loopback_test_done;
5760         }
5761
5762         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5763                 goto loopback_test_done;
5764         }
5765
5766         for (i = 14; i < pkt_size; i++) {
5767                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5768                         goto loopback_test_done;
5769                 }
5770         }
5771
5772         ret = 0;
5773
5774 loopback_test_done:
5775         bp->loopback = 0;
5776         return ret;
5777 }
5778
5779 #define BNX2_MAC_LOOPBACK_FAILED        1
5780 #define BNX2_PHY_LOOPBACK_FAILED        2
5781 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5782                                          BNX2_PHY_LOOPBACK_FAILED)
5783
5784 static int
5785 bnx2_test_loopback(struct bnx2 *bp)
5786 {
5787         int rc = 0;
5788
5789         if (!netif_running(bp->dev))
5790                 return BNX2_LOOPBACK_FAILED;
5791
5792         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5793         spin_lock_bh(&bp->phy_lock);
5794         bnx2_init_phy(bp, 1);
5795         spin_unlock_bh(&bp->phy_lock);
5796         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5797                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5798         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5799                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5800         return rc;
5801 }
5802
5803 #define NVRAM_SIZE 0x200
5804 #define CRC32_RESIDUAL 0xdebb20e3
5805
5806 static int
5807 bnx2_test_nvram(struct bnx2 *bp)
5808 {
5809         __be32 buf[NVRAM_SIZE / 4];
5810         u8 *data = (u8 *) buf;
5811         int rc = 0;
5812         u32 magic, csum;
5813
5814         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5815                 goto test_nvram_done;
5816
5817         magic = be32_to_cpu(buf[0]);
5818         if (magic != 0x669955aa) {
5819                 rc = -ENODEV;
5820                 goto test_nvram_done;
5821         }
5822
5823         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5824                 goto test_nvram_done;
5825
5826         csum = ether_crc_le(0x100, data);
5827         if (csum != CRC32_RESIDUAL) {
5828                 rc = -ENODEV;
5829                 goto test_nvram_done;
5830         }
5831
5832         csum = ether_crc_le(0x100, data + 0x100);
5833         if (csum != CRC32_RESIDUAL) {
5834                 rc = -ENODEV;
5835         }
5836
5837 test_nvram_done:
5838         return rc;
5839 }
5840
5841 static int
5842 bnx2_test_link(struct bnx2 *bp)
5843 {
5844         u32 bmsr;
5845
5846         if (!netif_running(bp->dev))
5847                 return -ENODEV;
5848
5849         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5850                 if (bp->link_up)
5851                         return 0;
5852                 return -ENODEV;
5853         }
5854         spin_lock_bh(&bp->phy_lock);
5855         bnx2_enable_bmsr1(bp);
5856         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5857         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5858         bnx2_disable_bmsr1(bp);
5859         spin_unlock_bh(&bp->phy_lock);
5860
5861         if (bmsr & BMSR_LSTATUS) {
5862                 return 0;
5863         }
5864         return -ENODEV;
5865 }
5866
5867 static int
5868 bnx2_test_intr(struct bnx2 *bp)
5869 {
5870         int i;
5871         u16 status_idx;
5872
5873         if (!netif_running(bp->dev))
5874                 return -ENODEV;
5875
5876         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5877
5878         /* This register is not touched during run-time. */
5879         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5880         REG_RD(bp, BNX2_HC_COMMAND);
5881
5882         for (i = 0; i < 10; i++) {
5883                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5884                         status_idx) {
5885
5886                         break;
5887                 }
5888
5889                 msleep_interruptible(10);
5890         }
5891         if (i < 10)
5892                 return 0;
5893
5894         return -ENODEV;
5895 }
5896
5897 /* Determining link for parallel detection. */
5898 static int
5899 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5900 {
5901         u32 mode_ctl, an_dbg, exp;
5902
5903         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5904                 return 0;
5905
5906         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5907         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5908
5909         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5910                 return 0;
5911
5912         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5913         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5914         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5915
5916         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5917                 return 0;
5918
5919         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5920         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5921         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5922
5923         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5924                 return 0;
5925
5926         return 1;
5927 }
5928
5929 static void
5930 bnx2_5706_serdes_timer(struct bnx2 *bp)
5931 {
5932         int check_link = 1;
5933
5934         spin_lock(&bp->phy_lock);
5935         if (bp->serdes_an_pending) {
5936                 bp->serdes_an_pending--;
5937                 check_link = 0;
5938         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5939                 u32 bmcr;
5940
5941                 bp->current_interval = BNX2_TIMER_INTERVAL;
5942
5943                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5944
5945                 if (bmcr & BMCR_ANENABLE) {
5946                         if (bnx2_5706_serdes_has_link(bp)) {
5947                                 bmcr &= ~BMCR_ANENABLE;
5948                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5949                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5950                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5951                         }
5952                 }
5953         }
5954         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5955                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5956                 u32 phy2;
5957
5958                 bnx2_write_phy(bp, 0x17, 0x0f01);
5959                 bnx2_read_phy(bp, 0x15, &phy2);
5960                 if (phy2 & 0x20) {
5961                         u32 bmcr;
5962
5963                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5964                         bmcr |= BMCR_ANENABLE;
5965                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5966
5967                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5968                 }
5969         } else
5970                 bp->current_interval = BNX2_TIMER_INTERVAL;
5971
5972         if (check_link) {
5973                 u32 val;
5974
5975                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5976                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5977                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5978
5979                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5980                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5981                                 bnx2_5706s_force_link_dn(bp, 1);
5982                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5983                         } else
5984                                 bnx2_set_link(bp);
5985                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5986                         bnx2_set_link(bp);
5987         }
5988         spin_unlock(&bp->phy_lock);
5989 }
5990
5991 static void
5992 bnx2_5708_serdes_timer(struct bnx2 *bp)
5993 {
5994         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5995                 return;
5996
5997         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5998                 bp->serdes_an_pending = 0;
5999                 return;
6000         }
6001
6002         spin_lock(&bp->phy_lock);
6003         if (bp->serdes_an_pending)
6004                 bp->serdes_an_pending--;
6005         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6006                 u32 bmcr;
6007
6008                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6009                 if (bmcr & BMCR_ANENABLE) {
6010                         bnx2_enable_forced_2g5(bp);
6011                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6012                 } else {
6013                         bnx2_disable_forced_2g5(bp);
6014                         bp->serdes_an_pending = 2;
6015                         bp->current_interval = BNX2_TIMER_INTERVAL;
6016                 }
6017
6018         } else
6019                 bp->current_interval = BNX2_TIMER_INTERVAL;
6020
6021         spin_unlock(&bp->phy_lock);
6022 }
6023
6024 static void
6025 bnx2_timer(unsigned long data)
6026 {
6027         struct bnx2 *bp = (struct bnx2 *) data;
6028
6029         if (!netif_running(bp->dev))
6030                 return;
6031
6032         if (atomic_read(&bp->intr_sem) != 0)
6033                 goto bnx2_restart_timer;
6034
6035         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6036              BNX2_FLAG_USING_MSI)
6037                 bnx2_chk_missed_msi(bp);
6038
6039         bnx2_send_heart_beat(bp);
6040
6041         bp->stats_blk->stat_FwRxDrop =
6042                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6043
6044         /* workaround occasional corrupted counters */
6045         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6046                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6047                                             BNX2_HC_COMMAND_STATS_NOW);
6048
6049         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6050                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6051                         bnx2_5706_serdes_timer(bp);
6052                 else
6053                         bnx2_5708_serdes_timer(bp);
6054         }
6055
6056 bnx2_restart_timer:
6057         mod_timer(&bp->timer, jiffies + bp->current_interval);
6058 }
6059
6060 static int
6061 bnx2_request_irq(struct bnx2 *bp)
6062 {
6063         unsigned long flags;
6064         struct bnx2_irq *irq;
6065         int rc = 0, i;
6066
6067         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6068                 flags = 0;
6069         else
6070                 flags = IRQF_SHARED;
6071
6072         for (i = 0; i < bp->irq_nvecs; i++) {
6073                 irq = &bp->irq_tbl[i];
6074                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6075                                  &bp->bnx2_napi[i]);
6076                 if (rc)
6077                         break;
6078                 irq->requested = 1;
6079         }
6080         return rc;
6081 }
6082
6083 static void
6084 bnx2_free_irq(struct bnx2 *bp)
6085 {
6086         struct bnx2_irq *irq;
6087         int i;
6088
6089         for (i = 0; i < bp->irq_nvecs; i++) {
6090                 irq = &bp->irq_tbl[i];
6091                 if (irq->requested)
6092                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6093                 irq->requested = 0;
6094         }
6095         if (bp->flags & BNX2_FLAG_USING_MSI)
6096                 pci_disable_msi(bp->pdev);
6097         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6098                 pci_disable_msix(bp->pdev);
6099
6100         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6101 }
6102
6103 static void
6104 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6105 {
6106         int i, total_vecs, rc;
6107         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6108         struct net_device *dev = bp->dev;
6109         const int len = sizeof(bp->irq_tbl[0].name);
6110
6111         bnx2_setup_msix_tbl(bp);
6112         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6113         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6114         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6115
6116         /*  Need to flush the previous three writes to ensure MSI-X
6117          *  is setup properly */
6118         REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6119
6120         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6121                 msix_ent[i].entry = i;
6122                 msix_ent[i].vector = 0;
6123         }
6124
6125         total_vecs = msix_vecs;
6126 #ifdef BCM_CNIC
6127         total_vecs++;
6128 #endif
6129         rc = -ENOSPC;
6130         while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6131                 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6132                 if (rc <= 0)
6133                         break;
6134                 if (rc > 0)
6135                         total_vecs = rc;
6136         }
6137
6138         if (rc != 0)
6139                 return;
6140
6141         msix_vecs = total_vecs;
6142 #ifdef BCM_CNIC
6143         msix_vecs--;
6144 #endif
6145         bp->irq_nvecs = msix_vecs;
6146         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6147         for (i = 0; i < total_vecs; i++) {
6148                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6149                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6150                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6151         }
6152 }
6153
6154 static int
6155 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6156 {
6157         int cpus = num_online_cpus();
6158         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6159
6160         bp->irq_tbl[0].handler = bnx2_interrupt;
6161         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6162         bp->irq_nvecs = 1;
6163         bp->irq_tbl[0].vector = bp->pdev->irq;
6164
6165         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6166                 bnx2_enable_msix(bp, msix_vecs);
6167
6168         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6169             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6170                 if (pci_enable_msi(bp->pdev) == 0) {
6171                         bp->flags |= BNX2_FLAG_USING_MSI;
6172                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6173                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6174                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6175                         } else
6176                                 bp->irq_tbl[0].handler = bnx2_msi;
6177
6178                         bp->irq_tbl[0].vector = bp->pdev->irq;
6179                 }
6180         }
6181
6182         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6183         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6184
6185         bp->num_rx_rings = bp->irq_nvecs;
6186         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6187 }
6188
6189 /* Called with rtnl_lock */
6190 static int
6191 bnx2_open(struct net_device *dev)
6192 {
6193         struct bnx2 *bp = netdev_priv(dev);
6194         int rc;
6195
6196         netif_carrier_off(dev);
6197
6198         bnx2_set_power_state(bp, PCI_D0);
6199         bnx2_disable_int(bp);
6200
6201         rc = bnx2_setup_int_mode(bp, disable_msi);
6202         if (rc)
6203                 goto open_err;
6204         bnx2_init_napi(bp);
6205         bnx2_napi_enable(bp);
6206         rc = bnx2_alloc_mem(bp);
6207         if (rc)
6208                 goto open_err;
6209
6210         rc = bnx2_request_irq(bp);
6211         if (rc)
6212                 goto open_err;
6213
6214         rc = bnx2_init_nic(bp, 1);
6215         if (rc)
6216                 goto open_err;
6217
6218         mod_timer(&bp->timer, jiffies + bp->current_interval);
6219
6220         atomic_set(&bp->intr_sem, 0);
6221
6222         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6223
6224         bnx2_enable_int(bp);
6225
6226         if (bp->flags & BNX2_FLAG_USING_MSI) {
6227                 /* Test MSI to make sure it is working
6228                  * If MSI test fails, go back to INTx mode
6229                  */
6230                 if (bnx2_test_intr(bp) != 0) {
6231                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6232
6233                         bnx2_disable_int(bp);
6234                         bnx2_free_irq(bp);
6235
6236                         bnx2_setup_int_mode(bp, 1);
6237
6238                         rc = bnx2_init_nic(bp, 0);
6239
6240                         if (!rc)
6241                                 rc = bnx2_request_irq(bp);
6242
6243                         if (rc) {
6244                                 del_timer_sync(&bp->timer);
6245                                 goto open_err;
6246                         }
6247                         bnx2_enable_int(bp);
6248                 }
6249         }
6250         if (bp->flags & BNX2_FLAG_USING_MSI)
6251                 netdev_info(dev, "using MSI\n");
6252         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6253                 netdev_info(dev, "using MSIX\n");
6254
6255         netif_tx_start_all_queues(dev);
6256
6257         return 0;
6258
6259 open_err:
6260         bnx2_napi_disable(bp);
6261         bnx2_free_skbs(bp);
6262         bnx2_free_irq(bp);
6263         bnx2_free_mem(bp);
6264         bnx2_del_napi(bp);
6265         return rc;
6266 }
6267
6268 static void
6269 bnx2_reset_task(struct work_struct *work)
6270 {
6271         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6272
6273         rtnl_lock();
6274         if (!netif_running(bp->dev)) {
6275                 rtnl_unlock();
6276                 return;
6277         }
6278
6279         bnx2_netif_stop(bp, true);
6280
6281         bnx2_init_nic(bp, 1);
6282
6283         atomic_set(&bp->intr_sem, 1);
6284         bnx2_netif_start(bp, true);
6285         rtnl_unlock();
6286 }
6287
6288 static void
6289 bnx2_dump_state(struct bnx2 *bp)
6290 {
6291         struct net_device *dev = bp->dev;
6292         u32 mcp_p0, mcp_p1, val1, val2;
6293
6294         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6295         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6296                    atomic_read(&bp->intr_sem), val1);
6297         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6298         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6299         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6300         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6301                    REG_RD(bp, BNX2_EMAC_TX_STATUS),
6302                    REG_RD(bp, BNX2_EMAC_RX_STATUS));
6303         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6304                    REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6305         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6306                 mcp_p0 = BNX2_MCP_STATE_P0;
6307                 mcp_p1 = BNX2_MCP_STATE_P1;
6308         } else {
6309                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
6310                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
6311         }
6312         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6313                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6314         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6315                    REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6316         if (bp->flags & BNX2_FLAG_USING_MSIX)
6317                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6318                            REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6319 }
6320
6321 static void
6322 bnx2_tx_timeout(struct net_device *dev)
6323 {
6324         struct bnx2 *bp = netdev_priv(dev);
6325
6326         bnx2_dump_state(bp);
6327
6328         /* This allows the netif to be shutdown gracefully before resetting */
6329         schedule_work(&bp->reset_task);
6330 }
6331
6332 /* Called with netif_tx_lock.
6333  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6334  * netif_wake_queue().
6335  */
6336 static netdev_tx_t
6337 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6338 {
6339         struct bnx2 *bp = netdev_priv(dev);
6340         dma_addr_t mapping;
6341         struct tx_bd *txbd;
6342         struct sw_tx_bd *tx_buf;
6343         u32 len, vlan_tag_flags, last_frag, mss;
6344         u16 prod, ring_prod;
6345         int i;
6346         struct bnx2_napi *bnapi;
6347         struct bnx2_tx_ring_info *txr;
6348         struct netdev_queue *txq;
6349
6350         /*  Determine which tx ring we will be placed on */
6351         i = skb_get_queue_mapping(skb);
6352         bnapi = &bp->bnx2_napi[i];
6353         txr = &bnapi->tx_ring;
6354         txq = netdev_get_tx_queue(dev, i);
6355
6356         if (unlikely(bnx2_tx_avail(bp, txr) <
6357             (skb_shinfo(skb)->nr_frags + 1))) {
6358                 netif_tx_stop_queue(txq);
6359                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6360
6361                 return NETDEV_TX_BUSY;
6362         }
6363         len = skb_headlen(skb);
6364         prod = txr->tx_prod;
6365         ring_prod = TX_RING_IDX(prod);
6366
6367         vlan_tag_flags = 0;
6368         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6369                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6370         }
6371
6372         if (vlan_tx_tag_present(skb)) {
6373                 vlan_tag_flags |=
6374                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6375         }
6376
6377         if ((mss = skb_shinfo(skb)->gso_size)) {
6378                 u32 tcp_opt_len;
6379                 struct iphdr *iph;
6380
6381                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6382
6383                 tcp_opt_len = tcp_optlen(skb);
6384
6385                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6386                         u32 tcp_off = skb_transport_offset(skb) -
6387                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6388
6389                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6390                                           TX_BD_FLAGS_SW_FLAGS;
6391                         if (likely(tcp_off == 0))
6392                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6393                         else {
6394                                 tcp_off >>= 3;
6395                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6396                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6397                                                   ((tcp_off & 0x10) <<
6398                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6399                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6400                         }
6401                 } else {
6402                         iph = ip_hdr(skb);
6403                         if (tcp_opt_len || (iph->ihl > 5)) {
6404                                 vlan_tag_flags |= ((iph->ihl - 5) +
6405                                                    (tcp_opt_len >> 2)) << 8;
6406                         }
6407                 }
6408         } else
6409                 mss = 0;
6410
6411         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6412         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6413                 dev_kfree_skb(skb);
6414                 return NETDEV_TX_OK;
6415         }
6416
6417         tx_buf = &txr->tx_buf_ring[ring_prod];
6418         tx_buf->skb = skb;
6419         dma_unmap_addr_set(tx_buf, mapping, mapping);
6420
6421         txbd = &txr->tx_desc_ring[ring_prod];
6422
6423         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6424         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6425         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6426         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6427
6428         last_frag = skb_shinfo(skb)->nr_frags;
6429         tx_buf->nr_frags = last_frag;
6430         tx_buf->is_gso = skb_is_gso(skb);
6431
6432         for (i = 0; i < last_frag; i++) {
6433                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6434
6435                 prod = NEXT_TX_BD(prod);
6436                 ring_prod = TX_RING_IDX(prod);
6437                 txbd = &txr->tx_desc_ring[ring_prod];
6438
6439                 len = frag->size;
6440                 mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
6441                                        len, PCI_DMA_TODEVICE);
6442                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6443                         goto dma_error;
6444                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6445                                    mapping);
6446
6447                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6448                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6449                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6450                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6451
6452         }
6453         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6454
6455         prod = NEXT_TX_BD(prod);
6456         txr->tx_prod_bseq += skb->len;
6457
6458         REG_WR16(bp, txr->tx_bidx_addr, prod);
6459         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6460
6461         mmiowb();
6462
6463         txr->tx_prod = prod;
6464
6465         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6466                 netif_tx_stop_queue(txq);
6467
6468                 /* netif_tx_stop_queue() must be done before checking
6469                  * tx index in bnx2_tx_avail() below, because in
6470                  * bnx2_tx_int(), we update tx index before checking for
6471                  * netif_tx_queue_stopped().
6472                  */
6473                 smp_mb();
6474                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6475                         netif_tx_wake_queue(txq);
6476         }
6477
6478         return NETDEV_TX_OK;
6479 dma_error:
6480         /* save value of frag that failed */
6481         last_frag = i;
6482
6483         /* start back at beginning and unmap skb */
6484         prod = txr->tx_prod;
6485         ring_prod = TX_RING_IDX(prod);
6486         tx_buf = &txr->tx_buf_ring[ring_prod];
6487         tx_buf->skb = NULL;
6488         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6489                          skb_headlen(skb), PCI_DMA_TODEVICE);
6490
6491         /* unmap remaining mapped pages */
6492         for (i = 0; i < last_frag; i++) {
6493                 prod = NEXT_TX_BD(prod);
6494                 ring_prod = TX_RING_IDX(prod);
6495                 tx_buf = &txr->tx_buf_ring[ring_prod];
6496                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6497                                skb_shinfo(skb)->frags[i].size,
6498                                PCI_DMA_TODEVICE);
6499         }
6500
6501         dev_kfree_skb(skb);
6502         return NETDEV_TX_OK;
6503 }
6504
6505 /* Called with rtnl_lock */
6506 static int
6507 bnx2_close(struct net_device *dev)
6508 {
6509         struct bnx2 *bp = netdev_priv(dev);
6510
6511         cancel_work_sync(&bp->reset_task);
6512
6513         bnx2_disable_int_sync(bp);
6514         bnx2_napi_disable(bp);
6515         del_timer_sync(&bp->timer);
6516         bnx2_shutdown_chip(bp);
6517         bnx2_free_irq(bp);
6518         bnx2_free_skbs(bp);
6519         bnx2_free_mem(bp);
6520         bnx2_del_napi(bp);
6521         bp->link_up = 0;
6522         netif_carrier_off(bp->dev);
6523         bnx2_set_power_state(bp, PCI_D3hot);
6524         return 0;
6525 }
6526
6527 static void
6528 bnx2_save_stats(struct bnx2 *bp)
6529 {
6530         u32 *hw_stats = (u32 *) bp->stats_blk;
6531         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6532         int i;
6533
6534         /* The 1st 10 counters are 64-bit counters */
6535         for (i = 0; i < 20; i += 2) {
6536                 u32 hi;
6537                 u64 lo;
6538
6539                 hi = temp_stats[i] + hw_stats[i];
6540                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6541                 if (lo > 0xffffffff)
6542                         hi++;
6543                 temp_stats[i] = hi;
6544                 temp_stats[i + 1] = lo & 0xffffffff;
6545         }
6546
6547         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6548                 temp_stats[i] += hw_stats[i];
6549 }
6550
6551 #define GET_64BIT_NET_STATS64(ctr)              \
6552         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6553
6554 #define GET_64BIT_NET_STATS(ctr)                                \
6555         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6556         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6557
6558 #define GET_32BIT_NET_STATS(ctr)                                \
6559         (unsigned long) (bp->stats_blk->ctr +                   \
6560                          bp->temp_stats_blk->ctr)
6561
6562 static struct rtnl_link_stats64 *
6563 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6564 {
6565         struct bnx2 *bp = netdev_priv(dev);
6566
6567         if (bp->stats_blk == NULL)
6568                 return net_stats;
6569
6570         net_stats->rx_packets =
6571                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6572                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6573                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6574
6575         net_stats->tx_packets =
6576                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6577                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6578                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6579
6580         net_stats->rx_bytes =
6581                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6582
6583         net_stats->tx_bytes =
6584                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6585
6586         net_stats->multicast =
6587                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6588
6589         net_stats->collisions =
6590                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6591
6592         net_stats->rx_length_errors =
6593                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6594                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6595
6596         net_stats->rx_over_errors =
6597                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6598                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6599
6600         net_stats->rx_frame_errors =
6601                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6602
6603         net_stats->rx_crc_errors =
6604                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6605
6606         net_stats->rx_errors = net_stats->rx_length_errors +
6607                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6608                 net_stats->rx_crc_errors;
6609
6610         net_stats->tx_aborted_errors =
6611                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6612                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6613
6614         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6615             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6616                 net_stats->tx_carrier_errors = 0;
6617         else {
6618                 net_stats->tx_carrier_errors =
6619                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6620         }
6621
6622         net_stats->tx_errors =
6623                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6624                 net_stats->tx_aborted_errors +
6625                 net_stats->tx_carrier_errors;
6626
6627         net_stats->rx_missed_errors =
6628                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6629                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6630                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6631
6632         return net_stats;
6633 }
6634
6635 /* All ethtool functions called with rtnl_lock */
6636
6637 static int
6638 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6639 {
6640         struct bnx2 *bp = netdev_priv(dev);
6641         int support_serdes = 0, support_copper = 0;
6642
6643         cmd->supported = SUPPORTED_Autoneg;
6644         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6645                 support_serdes = 1;
6646                 support_copper = 1;
6647         } else if (bp->phy_port == PORT_FIBRE)
6648                 support_serdes = 1;
6649         else
6650                 support_copper = 1;
6651
6652         if (support_serdes) {
6653                 cmd->supported |= SUPPORTED_1000baseT_Full |
6654                         SUPPORTED_FIBRE;
6655                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6656                         cmd->supported |= SUPPORTED_2500baseX_Full;
6657
6658         }
6659         if (support_copper) {
6660                 cmd->supported |= SUPPORTED_10baseT_Half |
6661                         SUPPORTED_10baseT_Full |
6662                         SUPPORTED_100baseT_Half |
6663                         SUPPORTED_100baseT_Full |
6664                         SUPPORTED_1000baseT_Full |
6665                         SUPPORTED_TP;
6666
6667         }
6668
6669         spin_lock_bh(&bp->phy_lock);
6670         cmd->port = bp->phy_port;
6671         cmd->advertising = bp->advertising;
6672
6673         if (bp->autoneg & AUTONEG_SPEED) {
6674                 cmd->autoneg = AUTONEG_ENABLE;
6675         }
6676         else {
6677                 cmd->autoneg = AUTONEG_DISABLE;
6678         }
6679
6680         if (netif_carrier_ok(dev)) {
6681                 cmd->speed = bp->line_speed;
6682                 cmd->duplex = bp->duplex;
6683         }
6684         else {
6685                 cmd->speed = -1;
6686                 cmd->duplex = -1;
6687         }
6688         spin_unlock_bh(&bp->phy_lock);
6689
6690         cmd->transceiver = XCVR_INTERNAL;
6691         cmd->phy_address = bp->phy_addr;
6692
6693         return 0;
6694 }
6695
6696 static int
6697 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6698 {
6699         struct bnx2 *bp = netdev_priv(dev);
6700         u8 autoneg = bp->autoneg;
6701         u8 req_duplex = bp->req_duplex;
6702         u16 req_line_speed = bp->req_line_speed;
6703         u32 advertising = bp->advertising;
6704         int err = -EINVAL;
6705
6706         spin_lock_bh(&bp->phy_lock);
6707
6708         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6709                 goto err_out_unlock;
6710
6711         if (cmd->port != bp->phy_port &&
6712             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6713                 goto err_out_unlock;
6714
6715         /* If device is down, we can store the settings only if the user
6716          * is setting the currently active port.
6717          */
6718         if (!netif_running(dev) && cmd->port != bp->phy_port)
6719                 goto err_out_unlock;
6720
6721         if (cmd->autoneg == AUTONEG_ENABLE) {
6722                 autoneg |= AUTONEG_SPEED;
6723
6724                 advertising = cmd->advertising;
6725                 if (cmd->port == PORT_TP) {
6726                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6727                         if (!advertising)
6728                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6729                 } else {
6730                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6731                         if (!advertising)
6732                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6733                 }
6734                 advertising |= ADVERTISED_Autoneg;
6735         }
6736         else {
6737                 if (cmd->port == PORT_FIBRE) {
6738                         if ((cmd->speed != SPEED_1000 &&
6739                              cmd->speed != SPEED_2500) ||
6740                             (cmd->duplex != DUPLEX_FULL))
6741                                 goto err_out_unlock;
6742
6743                         if (cmd->speed == SPEED_2500 &&
6744                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6745                                 goto err_out_unlock;
6746                 }
6747                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6748                         goto err_out_unlock;
6749
6750                 autoneg &= ~AUTONEG_SPEED;
6751                 req_line_speed = cmd->speed;
6752                 req_duplex = cmd->duplex;
6753                 advertising = 0;
6754         }
6755
6756         bp->autoneg = autoneg;
6757         bp->advertising = advertising;
6758         bp->req_line_speed = req_line_speed;
6759         bp->req_duplex = req_duplex;
6760
6761         err = 0;
6762         /* If device is down, the new settings will be picked up when it is
6763          * brought up.
6764          */
6765         if (netif_running(dev))
6766                 err = bnx2_setup_phy(bp, cmd->port);
6767
6768 err_out_unlock:
6769         spin_unlock_bh(&bp->phy_lock);
6770
6771         return err;
6772 }
6773
6774 static void
6775 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6776 {
6777         struct bnx2 *bp = netdev_priv(dev);
6778
6779         strcpy(info->driver, DRV_MODULE_NAME);
6780         strcpy(info->version, DRV_MODULE_VERSION);
6781         strcpy(info->bus_info, pci_name(bp->pdev));
6782         strcpy(info->fw_version, bp->fw_version);
6783 }
6784
6785 #define BNX2_REGDUMP_LEN                (32 * 1024)
6786
6787 static int
6788 bnx2_get_regs_len(struct net_device *dev)
6789 {
6790         return BNX2_REGDUMP_LEN;
6791 }
6792
6793 static void
6794 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6795 {
6796         u32 *p = _p, i, offset;
6797         u8 *orig_p = _p;
6798         struct bnx2 *bp = netdev_priv(dev);
6799         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6800                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6801                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6802                                  0x1040, 0x1048, 0x1080, 0x10a4,
6803                                  0x1400, 0x1490, 0x1498, 0x14f0,
6804                                  0x1500, 0x155c, 0x1580, 0x15dc,
6805                                  0x1600, 0x1658, 0x1680, 0x16d8,
6806                                  0x1800, 0x1820, 0x1840, 0x1854,
6807                                  0x1880, 0x1894, 0x1900, 0x1984,
6808                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6809                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6810                                  0x2000, 0x2030, 0x23c0, 0x2400,
6811                                  0x2800, 0x2820, 0x2830, 0x2850,
6812                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6813                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6814                                  0x4080, 0x4090, 0x43c0, 0x4458,
6815                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6816                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6817                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6818                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6819                                  0x6800, 0x6848, 0x684c, 0x6860,
6820                                  0x6888, 0x6910, 0x8000 };
6821
6822         regs->version = 0;
6823
6824         memset(p, 0, BNX2_REGDUMP_LEN);
6825
6826         if (!netif_running(bp->dev))
6827                 return;
6828
6829         i = 0;
6830         offset = reg_boundaries[0];
6831         p += offset;
6832         while (offset < BNX2_REGDUMP_LEN) {
6833                 *p++ = REG_RD(bp, offset);
6834                 offset += 4;
6835                 if (offset == reg_boundaries[i + 1]) {
6836                         offset = reg_boundaries[i + 2];
6837                         p = (u32 *) (orig_p + offset);
6838                         i += 2;
6839                 }
6840         }
6841 }
6842
6843 static void
6844 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6845 {
6846         struct bnx2 *bp = netdev_priv(dev);
6847
6848         if (bp->flags & BNX2_FLAG_NO_WOL) {
6849                 wol->supported = 0;
6850                 wol->wolopts = 0;
6851         }
6852         else {
6853                 wol->supported = WAKE_MAGIC;
6854                 if (bp->wol)
6855                         wol->wolopts = WAKE_MAGIC;
6856                 else
6857                         wol->wolopts = 0;
6858         }
6859         memset(&wol->sopass, 0, sizeof(wol->sopass));
6860 }
6861
6862 static int
6863 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6864 {
6865         struct bnx2 *bp = netdev_priv(dev);
6866
6867         if (wol->wolopts & ~WAKE_MAGIC)
6868                 return -EINVAL;
6869
6870         if (wol->wolopts & WAKE_MAGIC) {
6871                 if (bp->flags & BNX2_FLAG_NO_WOL)
6872                         return -EINVAL;
6873
6874                 bp->wol = 1;
6875         }
6876         else {
6877                 bp->wol = 0;
6878         }
6879         return 0;
6880 }
6881
6882 static int
6883 bnx2_nway_reset(struct net_device *dev)
6884 {
6885         struct bnx2 *bp = netdev_priv(dev);
6886         u32 bmcr;
6887
6888         if (!netif_running(dev))
6889                 return -EAGAIN;
6890
6891         if (!(bp->autoneg & AUTONEG_SPEED)) {
6892                 return -EINVAL;
6893         }
6894
6895         spin_lock_bh(&bp->phy_lock);
6896
6897         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6898                 int rc;
6899
6900                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6901                 spin_unlock_bh(&bp->phy_lock);
6902                 return rc;
6903         }
6904
6905         /* Force a link down visible on the other side */
6906         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6907                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6908                 spin_unlock_bh(&bp->phy_lock);
6909
6910                 msleep(20);
6911
6912                 spin_lock_bh(&bp->phy_lock);
6913
6914                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6915                 bp->serdes_an_pending = 1;
6916                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6917         }
6918
6919         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6920         bmcr &= ~BMCR_LOOPBACK;
6921         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6922
6923         spin_unlock_bh(&bp->phy_lock);
6924
6925         return 0;
6926 }
6927
6928 static u32
6929 bnx2_get_link(struct net_device *dev)
6930 {
6931         struct bnx2 *bp = netdev_priv(dev);
6932
6933         return bp->link_up;
6934 }
6935
6936 static int
6937 bnx2_get_eeprom_len(struct net_device *dev)
6938 {
6939         struct bnx2 *bp = netdev_priv(dev);
6940
6941         if (bp->flash_info == NULL)
6942                 return 0;
6943
6944         return (int) bp->flash_size;
6945 }
6946
6947 static int
6948 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6949                 u8 *eebuf)
6950 {
6951         struct bnx2 *bp = netdev_priv(dev);
6952         int rc;
6953
6954         if (!netif_running(dev))
6955                 return -EAGAIN;
6956
6957         /* parameters already validated in ethtool_get_eeprom */
6958
6959         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6960
6961         return rc;
6962 }
6963
6964 static int
6965 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6966                 u8 *eebuf)
6967 {
6968         struct bnx2 *bp = netdev_priv(dev);
6969         int rc;
6970
6971         if (!netif_running(dev))
6972                 return -EAGAIN;
6973
6974         /* parameters already validated in ethtool_set_eeprom */
6975
6976         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6977
6978         return rc;
6979 }
6980
6981 static int
6982 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6983 {
6984         struct bnx2 *bp = netdev_priv(dev);
6985
6986         memset(coal, 0, sizeof(struct ethtool_coalesce));
6987
6988         coal->rx_coalesce_usecs = bp->rx_ticks;
6989         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6990         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6991         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6992
6993         coal->tx_coalesce_usecs = bp->tx_ticks;
6994         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6995         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6996         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6997
6998         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6999
7000         return 0;
7001 }
7002
7003 static int
7004 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7005 {
7006         struct bnx2 *bp = netdev_priv(dev);
7007
7008         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7009         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7010
7011         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7012         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7013
7014         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7015         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7016
7017         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7018         if (bp->rx_quick_cons_trip_int > 0xff)
7019                 bp->rx_quick_cons_trip_int = 0xff;
7020
7021         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7022         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7023
7024         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7025         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7026
7027         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7028         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7029
7030         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7031         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7032                 0xff;
7033
7034         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7035         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7036                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7037                         bp->stats_ticks = USEC_PER_SEC;
7038         }
7039         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7040                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7041         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7042
7043         if (netif_running(bp->dev)) {
7044                 bnx2_netif_stop(bp, true);
7045                 bnx2_init_nic(bp, 0);
7046                 bnx2_netif_start(bp, true);
7047         }
7048
7049         return 0;
7050 }
7051
7052 static void
7053 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7054 {
7055         struct bnx2 *bp = netdev_priv(dev);
7056
7057         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7058         ering->rx_mini_max_pending = 0;
7059         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7060
7061         ering->rx_pending = bp->rx_ring_size;
7062         ering->rx_mini_pending = 0;
7063         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7064
7065         ering->tx_max_pending = MAX_TX_DESC_CNT;
7066         ering->tx_pending = bp->tx_ring_size;
7067 }
7068
7069 static int
7070 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7071 {
7072         if (netif_running(bp->dev)) {
7073                 /* Reset will erase chipset stats; save them */
7074                 bnx2_save_stats(bp);
7075
7076                 bnx2_netif_stop(bp, true);
7077                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7078                 bnx2_free_skbs(bp);
7079                 bnx2_free_mem(bp);
7080         }
7081
7082         bnx2_set_rx_ring_size(bp, rx);
7083         bp->tx_ring_size = tx;
7084
7085         if (netif_running(bp->dev)) {
7086                 int rc;
7087
7088                 rc = bnx2_alloc_mem(bp);
7089                 if (!rc)
7090                         rc = bnx2_init_nic(bp, 0);
7091
7092                 if (rc) {
7093                         bnx2_napi_enable(bp);
7094                         dev_close(bp->dev);
7095                         return rc;
7096                 }
7097 #ifdef BCM_CNIC
7098                 mutex_lock(&bp->cnic_lock);
7099                 /* Let cnic know about the new status block. */
7100                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7101                         bnx2_setup_cnic_irq_info(bp);
7102                 mutex_unlock(&bp->cnic_lock);
7103 #endif
7104                 bnx2_netif_start(bp, true);
7105         }
7106         return 0;
7107 }
7108
7109 static int
7110 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7111 {
7112         struct bnx2 *bp = netdev_priv(dev);
7113         int rc;
7114
7115         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7116                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7117                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7118
7119                 return -EINVAL;
7120         }
7121         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7122         return rc;
7123 }
7124
7125 static void
7126 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7127 {
7128         struct bnx2 *bp = netdev_priv(dev);
7129
7130         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7131         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7132         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7133 }
7134
7135 static int
7136 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7137 {
7138         struct bnx2 *bp = netdev_priv(dev);
7139
7140         bp->req_flow_ctrl = 0;
7141         if (epause->rx_pause)
7142                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7143         if (epause->tx_pause)
7144                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7145
7146         if (epause->autoneg) {
7147                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7148         }
7149         else {
7150                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7151         }
7152
7153         if (netif_running(dev)) {
7154                 spin_lock_bh(&bp->phy_lock);
7155                 bnx2_setup_phy(bp, bp->phy_port);
7156                 spin_unlock_bh(&bp->phy_lock);
7157         }
7158
7159         return 0;
7160 }
7161
7162 static u32
7163 bnx2_get_rx_csum(struct net_device *dev)
7164 {
7165         struct bnx2 *bp = netdev_priv(dev);
7166
7167         return bp->rx_csum;
7168 }
7169
7170 static int
7171 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7172 {
7173         struct bnx2 *bp = netdev_priv(dev);
7174
7175         bp->rx_csum = data;
7176         return 0;
7177 }
7178
7179 static int
7180 bnx2_set_tso(struct net_device *dev, u32 data)
7181 {
7182         struct bnx2 *bp = netdev_priv(dev);
7183
7184         if (data) {
7185                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7186                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7187                         dev->features |= NETIF_F_TSO6;
7188         } else
7189                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7190                                    NETIF_F_TSO_ECN);
7191         return 0;
7192 }
7193
7194 static struct {
7195         char string[ETH_GSTRING_LEN];
7196 } bnx2_stats_str_arr[] = {
7197         { "rx_bytes" },
7198         { "rx_error_bytes" },
7199         { "tx_bytes" },
7200         { "tx_error_bytes" },
7201         { "rx_ucast_packets" },
7202         { "rx_mcast_packets" },
7203         { "rx_bcast_packets" },
7204         { "tx_ucast_packets" },
7205         { "tx_mcast_packets" },
7206         { "tx_bcast_packets" },
7207         { "tx_mac_errors" },
7208         { "tx_carrier_errors" },
7209         { "rx_crc_errors" },
7210         { "rx_align_errors" },
7211         { "tx_single_collisions" },
7212         { "tx_multi_collisions" },
7213         { "tx_deferred" },
7214         { "tx_excess_collisions" },
7215         { "tx_late_collisions" },
7216         { "tx_total_collisions" },
7217         { "rx_fragments" },
7218         { "rx_jabbers" },
7219         { "rx_undersize_packets" },
7220         { "rx_oversize_packets" },
7221         { "rx_64_byte_packets" },
7222         { "rx_65_to_127_byte_packets" },
7223         { "rx_128_to_255_byte_packets" },
7224         { "rx_256_to_511_byte_packets" },
7225         { "rx_512_to_1023_byte_packets" },
7226         { "rx_1024_to_1522_byte_packets" },
7227         { "rx_1523_to_9022_byte_packets" },
7228         { "tx_64_byte_packets" },
7229         { "tx_65_to_127_byte_packets" },
7230         { "tx_128_to_255_byte_packets" },
7231         { "tx_256_to_511_byte_packets" },
7232         { "tx_512_to_1023_byte_packets" },
7233         { "tx_1024_to_1522_byte_packets" },
7234         { "tx_1523_to_9022_byte_packets" },
7235         { "rx_xon_frames" },
7236         { "rx_xoff_frames" },
7237         { "tx_xon_frames" },
7238         { "tx_xoff_frames" },
7239         { "rx_mac_ctrl_frames" },
7240         { "rx_filtered_packets" },
7241         { "rx_ftq_discards" },
7242         { "rx_discards" },
7243         { "rx_fw_discards" },
7244 };
7245
7246 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7247                         sizeof(bnx2_stats_str_arr[0]))
7248
7249 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7250
7251 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7252     STATS_OFFSET32(stat_IfHCInOctets_hi),
7253     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7254     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7255     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7256     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7257     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7258     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7259     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7260     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7261     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7262     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7263     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7264     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7265     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7266     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7267     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7268     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7269     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7270     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7271     STATS_OFFSET32(stat_EtherStatsCollisions),
7272     STATS_OFFSET32(stat_EtherStatsFragments),
7273     STATS_OFFSET32(stat_EtherStatsJabbers),
7274     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7275     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7276     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7277     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7278     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7279     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7280     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7281     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7282     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7283     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7284     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7285     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7286     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7287     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7288     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7289     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7290     STATS_OFFSET32(stat_XonPauseFramesReceived),
7291     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7292     STATS_OFFSET32(stat_OutXonSent),
7293     STATS_OFFSET32(stat_OutXoffSent),
7294     STATS_OFFSET32(stat_MacControlFramesReceived),
7295     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7296     STATS_OFFSET32(stat_IfInFTQDiscards),
7297     STATS_OFFSET32(stat_IfInMBUFDiscards),
7298     STATS_OFFSET32(stat_FwRxDrop),
7299 };
7300
7301 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7302  * skipped because of errata.
7303  */
7304 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7305         8,0,8,8,8,8,8,8,8,8,
7306         4,0,4,4,4,4,4,4,4,4,
7307         4,4,4,4,4,4,4,4,4,4,
7308         4,4,4,4,4,4,4,4,4,4,
7309         4,4,4,4,4,4,4,
7310 };
7311
7312 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7313         8,0,8,8,8,8,8,8,8,8,
7314         4,4,4,4,4,4,4,4,4,4,
7315         4,4,4,4,4,4,4,4,4,4,
7316         4,4,4,4,4,4,4,4,4,4,
7317         4,4,4,4,4,4,4,
7318 };
7319
7320 #define BNX2_NUM_TESTS 6
7321
7322 static struct {
7323         char string[ETH_GSTRING_LEN];
7324 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7325         { "register_test (offline)" },
7326         { "memory_test (offline)" },
7327         { "loopback_test (offline)" },
7328         { "nvram_test (online)" },
7329         { "interrupt_test (online)" },
7330         { "link_test (online)" },
7331 };
7332
7333 static int
7334 bnx2_get_sset_count(struct net_device *dev, int sset)
7335 {
7336         switch (sset) {
7337         case ETH_SS_TEST:
7338                 return BNX2_NUM_TESTS;
7339         case ETH_SS_STATS:
7340                 return BNX2_NUM_STATS;
7341         default:
7342                 return -EOPNOTSUPP;
7343         }
7344 }
7345
7346 static void
7347 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7348 {
7349         struct bnx2 *bp = netdev_priv(dev);
7350
7351         bnx2_set_power_state(bp, PCI_D0);
7352
7353         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7354         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7355                 int i;
7356
7357                 bnx2_netif_stop(bp, true);
7358                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7359                 bnx2_free_skbs(bp);
7360
7361                 if (bnx2_test_registers(bp) != 0) {
7362                         buf[0] = 1;
7363                         etest->flags |= ETH_TEST_FL_FAILED;
7364                 }
7365                 if (bnx2_test_memory(bp) != 0) {
7366                         buf[1] = 1;
7367                         etest->flags |= ETH_TEST_FL_FAILED;
7368                 }
7369                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7370                         etest->flags |= ETH_TEST_FL_FAILED;
7371
7372                 if (!netif_running(bp->dev))
7373                         bnx2_shutdown_chip(bp);
7374                 else {
7375                         bnx2_init_nic(bp, 1);
7376                         bnx2_netif_start(bp, true);
7377                 }
7378
7379                 /* wait for link up */
7380                 for (i = 0; i < 7; i++) {
7381                         if (bp->link_up)
7382                                 break;
7383                         msleep_interruptible(1000);
7384                 }
7385         }
7386
7387         if (bnx2_test_nvram(bp) != 0) {
7388                 buf[3] = 1;
7389                 etest->flags |= ETH_TEST_FL_FAILED;
7390         }
7391         if (bnx2_test_intr(bp) != 0) {
7392                 buf[4] = 1;
7393                 etest->flags |= ETH_TEST_FL_FAILED;
7394         }
7395
7396         if (bnx2_test_link(bp) != 0) {
7397                 buf[5] = 1;
7398                 etest->flags |= ETH_TEST_FL_FAILED;
7399
7400         }
7401         if (!netif_running(bp->dev))
7402                 bnx2_set_power_state(bp, PCI_D3hot);
7403 }
7404
7405 static void
7406 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7407 {
7408         switch (stringset) {
7409         case ETH_SS_STATS:
7410                 memcpy(buf, bnx2_stats_str_arr,
7411                         sizeof(bnx2_stats_str_arr));
7412                 break;
7413         case ETH_SS_TEST:
7414                 memcpy(buf, bnx2_tests_str_arr,
7415                         sizeof(bnx2_tests_str_arr));
7416                 break;
7417         }
7418 }
7419
7420 static void
7421 bnx2_get_ethtool_stats(struct net_device *dev,
7422                 struct ethtool_stats *stats, u64 *buf)
7423 {
7424         struct bnx2 *bp = netdev_priv(dev);
7425         int i;
7426         u32 *hw_stats = (u32 *) bp->stats_blk;
7427         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7428         u8 *stats_len_arr = NULL;
7429
7430         if (hw_stats == NULL) {
7431                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7432                 return;
7433         }
7434
7435         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7436             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7437             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7438             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7439                 stats_len_arr = bnx2_5706_stats_len_arr;
7440         else
7441                 stats_len_arr = bnx2_5708_stats_len_arr;
7442
7443         for (i = 0; i < BNX2_NUM_STATS; i++) {
7444                 unsigned long offset;
7445
7446                 if (stats_len_arr[i] == 0) {
7447                         /* skip this counter */
7448                         buf[i] = 0;
7449                         continue;
7450                 }
7451
7452                 offset = bnx2_stats_offset_arr[i];
7453                 if (stats_len_arr[i] == 4) {
7454                         /* 4-byte counter */
7455                         buf[i] = (u64) *(hw_stats + offset) +
7456                                  *(temp_stats + offset);
7457                         continue;
7458                 }
7459                 /* 8-byte counter */
7460                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7461                          *(hw_stats + offset + 1) +
7462                          (((u64) *(temp_stats + offset)) << 32) +
7463                          *(temp_stats + offset + 1);
7464         }
7465 }
7466
7467 static int
7468 bnx2_phys_id(struct net_device *dev, u32 data)
7469 {
7470         struct bnx2 *bp = netdev_priv(dev);
7471         int i;
7472         u32 save;
7473
7474         bnx2_set_power_state(bp, PCI_D0);
7475
7476         if (data == 0)
7477                 data = 2;
7478
7479         save = REG_RD(bp, BNX2_MISC_CFG);
7480         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7481
7482         for (i = 0; i < (data * 2); i++) {
7483                 if ((i % 2) == 0) {
7484                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7485                 }
7486                 else {
7487                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7488                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7489                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7490                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7491                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7492                                 BNX2_EMAC_LED_TRAFFIC);
7493                 }
7494                 msleep_interruptible(500);
7495                 if (signal_pending(current))
7496                         break;
7497         }
7498         REG_WR(bp, BNX2_EMAC_LED, 0);
7499         REG_WR(bp, BNX2_MISC_CFG, save);
7500
7501         if (!netif_running(dev))
7502                 bnx2_set_power_state(bp, PCI_D3hot);
7503
7504         return 0;
7505 }
7506
7507 static int
7508 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7509 {
7510         struct bnx2 *bp = netdev_priv(dev);
7511
7512         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7513                 return ethtool_op_set_tx_ipv6_csum(dev, data);
7514         else
7515                 return ethtool_op_set_tx_csum(dev, data);
7516 }
7517
7518 static int
7519 bnx2_set_flags(struct net_device *dev, u32 data)
7520 {
7521         struct bnx2 *bp = netdev_priv(dev);
7522         int rc;
7523
7524         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) &&
7525             !(data & ETH_FLAG_RXVLAN))
7526                 return -EINVAL;
7527
7528         rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
7529                                   ETH_FLAG_TXVLAN);
7530         if (rc)
7531                 return rc;
7532
7533         if ((!!(data & ETH_FLAG_RXVLAN) !=
7534             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7535             netif_running(dev)) {
7536                 bnx2_netif_stop(bp, false);
7537                 bnx2_set_rx_mode(dev);
7538                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7539                 bnx2_netif_start(bp, false);
7540         }
7541
7542         return 0;
7543 }
7544
7545 static const struct ethtool_ops bnx2_ethtool_ops = {
7546         .get_settings           = bnx2_get_settings,
7547         .set_settings           = bnx2_set_settings,
7548         .get_drvinfo            = bnx2_get_drvinfo,
7549         .get_regs_len           = bnx2_get_regs_len,
7550         .get_regs               = bnx2_get_regs,
7551         .get_wol                = bnx2_get_wol,
7552         .set_wol                = bnx2_set_wol,
7553         .nway_reset             = bnx2_nway_reset,
7554         .get_link               = bnx2_get_link,
7555         .get_eeprom_len         = bnx2_get_eeprom_len,
7556         .get_eeprom             = bnx2_get_eeprom,
7557         .set_eeprom             = bnx2_set_eeprom,
7558         .get_coalesce           = bnx2_get_coalesce,
7559         .set_coalesce           = bnx2_set_coalesce,
7560         .get_ringparam          = bnx2_get_ringparam,
7561         .set_ringparam          = bnx2_set_ringparam,
7562         .get_pauseparam         = bnx2_get_pauseparam,
7563         .set_pauseparam         = bnx2_set_pauseparam,
7564         .get_rx_csum            = bnx2_get_rx_csum,
7565         .set_rx_csum            = bnx2_set_rx_csum,
7566         .set_tx_csum            = bnx2_set_tx_csum,
7567         .set_sg                 = ethtool_op_set_sg,
7568         .set_tso                = bnx2_set_tso,
7569         .self_test              = bnx2_self_test,
7570         .get_strings            = bnx2_get_strings,
7571         .phys_id                = bnx2_phys_id,
7572         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7573         .get_sset_count         = bnx2_get_sset_count,
7574         .set_flags              = bnx2_set_flags,
7575         .get_flags              = ethtool_op_get_flags,
7576 };
7577
7578 /* Called with rtnl_lock */
7579 static int
7580 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7581 {
7582         struct mii_ioctl_data *data = if_mii(ifr);
7583         struct bnx2 *bp = netdev_priv(dev);
7584         int err;
7585
7586         switch(cmd) {
7587         case SIOCGMIIPHY:
7588                 data->phy_id = bp->phy_addr;
7589
7590                 /* fallthru */
7591         case SIOCGMIIREG: {
7592                 u32 mii_regval;
7593
7594                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7595                         return -EOPNOTSUPP;
7596
7597                 if (!netif_running(dev))
7598                         return -EAGAIN;
7599
7600                 spin_lock_bh(&bp->phy_lock);
7601                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7602                 spin_unlock_bh(&bp->phy_lock);
7603
7604                 data->val_out = mii_regval;
7605
7606                 return err;
7607         }
7608
7609         case SIOCSMIIREG:
7610                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7611                         return -EOPNOTSUPP;
7612
7613                 if (!netif_running(dev))
7614                         return -EAGAIN;
7615
7616                 spin_lock_bh(&bp->phy_lock);
7617                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7618                 spin_unlock_bh(&bp->phy_lock);
7619
7620                 return err;
7621
7622         default:
7623                 /* do nothing */
7624                 break;
7625         }
7626         return -EOPNOTSUPP;
7627 }
7628
7629 /* Called with rtnl_lock */
7630 static int
7631 bnx2_change_mac_addr(struct net_device *dev, void *p)
7632 {
7633         struct sockaddr *addr = p;
7634         struct bnx2 *bp = netdev_priv(dev);
7635
7636         if (!is_valid_ether_addr(addr->sa_data))
7637                 return -EINVAL;
7638
7639         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7640         if (netif_running(dev))
7641                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7642
7643         return 0;
7644 }
7645
7646 /* Called with rtnl_lock */
7647 static int
7648 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7649 {
7650         struct bnx2 *bp = netdev_priv(dev);
7651
7652         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7653                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7654                 return -EINVAL;
7655
7656         dev->mtu = new_mtu;
7657         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
7658 }
7659
7660 #ifdef CONFIG_NET_POLL_CONTROLLER
7661 static void
7662 poll_bnx2(struct net_device *dev)
7663 {
7664         struct bnx2 *bp = netdev_priv(dev);
7665         int i;
7666
7667         for (i = 0; i < bp->irq_nvecs; i++) {
7668                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7669
7670                 disable_irq(irq->vector);
7671                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7672                 enable_irq(irq->vector);
7673         }
7674 }
7675 #endif
7676
7677 static void __devinit
7678 bnx2_get_5709_media(struct bnx2 *bp)
7679 {
7680         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7681         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7682         u32 strap;
7683
7684         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7685                 return;
7686         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7687                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7688                 return;
7689         }
7690
7691         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7692                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7693         else
7694                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7695
7696         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7697                 switch (strap) {
7698                 case 0x4:
7699                 case 0x5:
7700                 case 0x6:
7701                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7702                         return;
7703                 }
7704         } else {
7705                 switch (strap) {
7706                 case 0x1:
7707                 case 0x2:
7708                 case 0x4:
7709                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7710                         return;
7711                 }
7712         }
7713 }
7714
7715 static void __devinit
7716 bnx2_get_pci_speed(struct bnx2 *bp)
7717 {
7718         u32 reg;
7719
7720         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7721         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7722                 u32 clkreg;
7723
7724                 bp->flags |= BNX2_FLAG_PCIX;
7725
7726                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7727
7728                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7729                 switch (clkreg) {
7730                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7731                         bp->bus_speed_mhz = 133;
7732                         break;
7733
7734                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7735                         bp->bus_speed_mhz = 100;
7736                         break;
7737
7738                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7739                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7740                         bp->bus_speed_mhz = 66;
7741                         break;
7742
7743                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7744                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7745                         bp->bus_speed_mhz = 50;
7746                         break;
7747
7748                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7749                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7750                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7751                         bp->bus_speed_mhz = 33;
7752                         break;
7753                 }
7754         }
7755         else {
7756                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7757                         bp->bus_speed_mhz = 66;
7758                 else
7759                         bp->bus_speed_mhz = 33;
7760         }
7761
7762         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7763                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7764
7765 }
7766
7767 static void __devinit
7768 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7769 {
7770         int rc, i, j;
7771         u8 *data;
7772         unsigned int block_end, rosize, len;
7773
7774 #define BNX2_VPD_NVRAM_OFFSET   0x300
7775 #define BNX2_VPD_LEN            128
7776 #define BNX2_MAX_VER_SLEN       30
7777
7778         data = kmalloc(256, GFP_KERNEL);
7779         if (!data)
7780                 return;
7781
7782         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7783                              BNX2_VPD_LEN);
7784         if (rc)
7785                 goto vpd_done;
7786
7787         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7788                 data[i] = data[i + BNX2_VPD_LEN + 3];
7789                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7790                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7791                 data[i + 3] = data[i + BNX2_VPD_LEN];
7792         }
7793
7794         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7795         if (i < 0)
7796                 goto vpd_done;
7797
7798         rosize = pci_vpd_lrdt_size(&data[i]);
7799         i += PCI_VPD_LRDT_TAG_SIZE;
7800         block_end = i + rosize;
7801
7802         if (block_end > BNX2_VPD_LEN)
7803                 goto vpd_done;
7804
7805         j = pci_vpd_find_info_keyword(data, i, rosize,
7806                                       PCI_VPD_RO_KEYWORD_MFR_ID);
7807         if (j < 0)
7808                 goto vpd_done;
7809
7810         len = pci_vpd_info_field_size(&data[j]);
7811
7812         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7813         if (j + len > block_end || len != 4 ||
7814             memcmp(&data[j], "1028", 4))
7815                 goto vpd_done;
7816
7817         j = pci_vpd_find_info_keyword(data, i, rosize,
7818                                       PCI_VPD_RO_KEYWORD_VENDOR0);
7819         if (j < 0)
7820                 goto vpd_done;
7821
7822         len = pci_vpd_info_field_size(&data[j]);
7823
7824         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7825         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7826                 goto vpd_done;
7827
7828         memcpy(bp->fw_version, &data[j], len);
7829         bp->fw_version[len] = ' ';
7830
7831 vpd_done:
7832         kfree(data);
7833 }
7834
7835 static int __devinit
7836 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7837 {
7838         struct bnx2 *bp;
7839         unsigned long mem_len;
7840         int rc, i, j;
7841         u32 reg;
7842         u64 dma_mask, persist_dma_mask;
7843         int err;
7844
7845         SET_NETDEV_DEV(dev, &pdev->dev);
7846         bp = netdev_priv(dev);
7847
7848         bp->flags = 0;
7849         bp->phy_flags = 0;
7850
7851         bp->temp_stats_blk =
7852                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7853
7854         if (bp->temp_stats_blk == NULL) {
7855                 rc = -ENOMEM;
7856                 goto err_out;
7857         }
7858
7859         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7860         rc = pci_enable_device(pdev);
7861         if (rc) {
7862                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7863                 goto err_out;
7864         }
7865
7866         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7867                 dev_err(&pdev->dev,
7868                         "Cannot find PCI device base address, aborting\n");
7869                 rc = -ENODEV;
7870                 goto err_out_disable;
7871         }
7872
7873         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7874         if (rc) {
7875                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7876                 goto err_out_disable;
7877         }
7878
7879         pci_set_master(pdev);
7880
7881         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7882         if (bp->pm_cap == 0) {
7883                 dev_err(&pdev->dev,
7884                         "Cannot find power management capability, aborting\n");
7885                 rc = -EIO;
7886                 goto err_out_release;
7887         }
7888
7889         bp->dev = dev;
7890         bp->pdev = pdev;
7891
7892         spin_lock_init(&bp->phy_lock);
7893         spin_lock_init(&bp->indirect_lock);
7894 #ifdef BCM_CNIC
7895         mutex_init(&bp->cnic_lock);
7896 #endif
7897         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7898
7899         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7900         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7901         dev->mem_end = dev->mem_start + mem_len;
7902         dev->irq = pdev->irq;
7903
7904         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7905
7906         if (!bp->regview) {
7907                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7908                 rc = -ENOMEM;
7909                 goto err_out_release;
7910         }
7911
7912         /* Configure byte swap and enable write to the reg_window registers.
7913          * Rely on CPU to do target byte swapping on big endian systems
7914          * The chip's target access swapping will not swap all accesses
7915          */
7916         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7917                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7918                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7919
7920         bnx2_set_power_state(bp, PCI_D0);
7921
7922         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7923
7924         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7925                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7926                         dev_err(&pdev->dev,
7927                                 "Cannot find PCIE capability, aborting\n");
7928                         rc = -EIO;
7929                         goto err_out_unmap;
7930                 }
7931                 bp->flags |= BNX2_FLAG_PCIE;
7932                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7933                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7934
7935                 /* AER (Advanced Error Reporting) hooks */
7936                 err = pci_enable_pcie_error_reporting(pdev);
7937                 if (err) {
7938                         dev_err(&pdev->dev, "pci_enable_pcie_error_reporting "
7939                                             "failed 0x%x\n", err);
7940                         /* non-fatal, continue */
7941                 }
7942
7943         } else {
7944                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7945                 if (bp->pcix_cap == 0) {
7946                         dev_err(&pdev->dev,
7947                                 "Cannot find PCIX capability, aborting\n");
7948                         rc = -EIO;
7949                         goto err_out_unmap;
7950                 }
7951                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7952         }
7953
7954         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7955                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7956                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7957         }
7958
7959         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7960                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7961                         bp->flags |= BNX2_FLAG_MSI_CAP;
7962         }
7963
7964         /* 5708 cannot support DMA addresses > 40-bit.  */
7965         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7966                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7967         else
7968                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7969
7970         /* Configure DMA attributes. */
7971         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7972                 dev->features |= NETIF_F_HIGHDMA;
7973                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7974                 if (rc) {
7975                         dev_err(&pdev->dev,
7976                                 "pci_set_consistent_dma_mask failed, aborting\n");
7977                         goto err_out_unmap;
7978                 }
7979         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7980                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7981                 goto err_out_unmap;
7982         }
7983
7984         if (!(bp->flags & BNX2_FLAG_PCIE))
7985                 bnx2_get_pci_speed(bp);
7986
7987         /* 5706A0 may falsely detect SERR and PERR. */
7988         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7989                 reg = REG_RD(bp, PCI_COMMAND);
7990                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7991                 REG_WR(bp, PCI_COMMAND, reg);
7992         }
7993         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7994                 !(bp->flags & BNX2_FLAG_PCIX)) {
7995
7996                 dev_err(&pdev->dev,
7997                         "5706 A1 can only be used in a PCIX bus, aborting\n");
7998                 goto err_out_unmap;
7999         }
8000
8001         bnx2_init_nvram(bp);
8002
8003         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8004
8005         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8006             BNX2_SHM_HDR_SIGNATURE_SIG) {
8007                 u32 off = PCI_FUNC(pdev->devfn) << 2;
8008
8009                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8010         } else
8011                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8012
8013         /* Get the permanent MAC address.  First we need to make sure the
8014          * firmware is actually running.
8015          */
8016         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8017
8018         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8019             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8020                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8021                 rc = -ENODEV;
8022                 goto err_out_unmap;
8023         }
8024
8025         bnx2_read_vpd_fw_ver(bp);
8026
8027         j = strlen(bp->fw_version);
8028         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8029         for (i = 0; i < 3 && j < 24; i++) {
8030                 u8 num, k, skip0;
8031
8032                 if (i == 0) {
8033                         bp->fw_version[j++] = 'b';
8034                         bp->fw_version[j++] = 'c';
8035                         bp->fw_version[j++] = ' ';
8036                 }
8037                 num = (u8) (reg >> (24 - (i * 8)));
8038                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8039                         if (num >= k || !skip0 || k == 1) {
8040                                 bp->fw_version[j++] = (num / k) + '0';
8041                                 skip0 = 0;
8042                         }
8043                 }
8044                 if (i != 2)
8045                         bp->fw_version[j++] = '.';
8046         }
8047         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8048         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8049                 bp->wol = 1;
8050
8051         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8052                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8053
8054                 for (i = 0; i < 30; i++) {
8055                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8056                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8057                                 break;
8058                         msleep(10);
8059                 }
8060         }
8061         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8062         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8063         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8064             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8065                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8066
8067                 if (j < 32)
8068                         bp->fw_version[j++] = ' ';
8069                 for (i = 0; i < 3 && j < 28; i++) {
8070                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8071                         reg = swab32(reg);
8072                         memcpy(&bp->fw_version[j], &reg, 4);
8073                         j += 4;
8074                 }
8075         }
8076
8077         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8078         bp->mac_addr[0] = (u8) (reg >> 8);
8079         bp->mac_addr[1] = (u8) reg;
8080
8081         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8082         bp->mac_addr[2] = (u8) (reg >> 24);
8083         bp->mac_addr[3] = (u8) (reg >> 16);
8084         bp->mac_addr[4] = (u8) (reg >> 8);
8085         bp->mac_addr[5] = (u8) reg;
8086
8087         bp->tx_ring_size = MAX_TX_DESC_CNT;
8088         bnx2_set_rx_ring_size(bp, 255);
8089
8090         bp->rx_csum = 1;
8091
8092         bp->tx_quick_cons_trip_int = 2;
8093         bp->tx_quick_cons_trip = 20;
8094         bp->tx_ticks_int = 18;
8095         bp->tx_ticks = 80;
8096
8097         bp->rx_quick_cons_trip_int = 2;
8098         bp->rx_quick_cons_trip = 12;
8099         bp->rx_ticks_int = 18;
8100         bp->rx_ticks = 18;
8101
8102         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8103
8104         bp->current_interval = BNX2_TIMER_INTERVAL;
8105
8106         bp->phy_addr = 1;
8107
8108         /* Disable WOL support if we are running on a SERDES chip. */
8109         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8110                 bnx2_get_5709_media(bp);
8111         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8112                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8113
8114         bp->phy_port = PORT_TP;
8115         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8116                 bp->phy_port = PORT_FIBRE;
8117                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8118                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8119                         bp->flags |= BNX2_FLAG_NO_WOL;
8120                         bp->wol = 0;
8121                 }
8122                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8123                         /* Don't do parallel detect on this board because of
8124                          * some board problems.  The link will not go down
8125                          * if we do parallel detect.
8126                          */
8127                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8128                             pdev->subsystem_device == 0x310c)
8129                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8130                 } else {
8131                         bp->phy_addr = 2;
8132                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8133                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8134                 }
8135         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8136                    CHIP_NUM(bp) == CHIP_NUM_5708)
8137                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8138         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8139                  (CHIP_REV(bp) == CHIP_REV_Ax ||
8140                   CHIP_REV(bp) == CHIP_REV_Bx))
8141                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8142
8143         bnx2_init_fw_cap(bp);
8144
8145         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8146             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8147             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8148             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8149                 bp->flags |= BNX2_FLAG_NO_WOL;
8150                 bp->wol = 0;
8151         }
8152
8153         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8154                 bp->tx_quick_cons_trip_int =
8155                         bp->tx_quick_cons_trip;
8156                 bp->tx_ticks_int = bp->tx_ticks;
8157                 bp->rx_quick_cons_trip_int =
8158                         bp->rx_quick_cons_trip;
8159                 bp->rx_ticks_int = bp->rx_ticks;
8160                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8161                 bp->com_ticks_int = bp->com_ticks;
8162                 bp->cmd_ticks_int = bp->cmd_ticks;
8163         }
8164
8165         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8166          *
8167          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8168          * with byte enables disabled on the unused 32-bit word.  This is legal
8169          * but causes problems on the AMD 8132 which will eventually stop
8170          * responding after a while.
8171          *
8172          * AMD believes this incompatibility is unique to the 5706, and
8173          * prefers to locally disable MSI rather than globally disabling it.
8174          */
8175         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8176                 struct pci_dev *amd_8132 = NULL;
8177
8178                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8179                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8180                                                   amd_8132))) {
8181
8182                         if (amd_8132->revision >= 0x10 &&
8183                             amd_8132->revision <= 0x13) {
8184                                 disable_msi = 1;
8185                                 pci_dev_put(amd_8132);
8186                                 break;
8187                         }
8188                 }
8189         }
8190
8191         bnx2_set_default_link(bp);
8192         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8193
8194         init_timer(&bp->timer);
8195         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8196         bp->timer.data = (unsigned long) bp;
8197         bp->timer.function = bnx2_timer;
8198
8199         pci_save_state(pdev);
8200
8201         return 0;
8202
8203 err_out_unmap:
8204         if (bp->flags & BNX2_FLAG_PCIE)
8205                 pci_disable_pcie_error_reporting(pdev);
8206
8207         if (bp->regview) {
8208                 iounmap(bp->regview);
8209                 bp->regview = NULL;
8210         }
8211
8212 err_out_release:
8213         pci_release_regions(pdev);
8214
8215 err_out_disable:
8216         pci_disable_device(pdev);
8217         pci_set_drvdata(pdev, NULL);
8218
8219 err_out:
8220         return rc;
8221 }
8222
8223 static char * __devinit
8224 bnx2_bus_string(struct bnx2 *bp, char *str)
8225 {
8226         char *s = str;
8227
8228         if (bp->flags & BNX2_FLAG_PCIE) {
8229                 s += sprintf(s, "PCI Express");
8230         } else {
8231                 s += sprintf(s, "PCI");
8232                 if (bp->flags & BNX2_FLAG_PCIX)
8233                         s += sprintf(s, "-X");
8234                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8235                         s += sprintf(s, " 32-bit");
8236                 else
8237                         s += sprintf(s, " 64-bit");
8238                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8239         }
8240         return str;
8241 }
8242
8243 static void
8244 bnx2_del_napi(struct bnx2 *bp)
8245 {
8246         int i;
8247
8248         for (i = 0; i < bp->irq_nvecs; i++)
8249                 netif_napi_del(&bp->bnx2_napi[i].napi);
8250 }
8251
8252 static void
8253 bnx2_init_napi(struct bnx2 *bp)
8254 {
8255         int i;
8256
8257         for (i = 0; i < bp->irq_nvecs; i++) {
8258                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8259                 int (*poll)(struct napi_struct *, int);
8260
8261                 if (i == 0)
8262                         poll = bnx2_poll;
8263                 else
8264                         poll = bnx2_poll_msix;
8265
8266                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8267                 bnapi->bp = bp;
8268         }
8269 }
8270
8271 static const struct net_device_ops bnx2_netdev_ops = {
8272         .ndo_open               = bnx2_open,
8273         .ndo_start_xmit         = bnx2_start_xmit,
8274         .ndo_stop               = bnx2_close,
8275         .ndo_get_stats64        = bnx2_get_stats64,
8276         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8277         .ndo_do_ioctl           = bnx2_ioctl,
8278         .ndo_validate_addr      = eth_validate_addr,
8279         .ndo_set_mac_address    = bnx2_change_mac_addr,
8280         .ndo_change_mtu         = bnx2_change_mtu,
8281         .ndo_tx_timeout         = bnx2_tx_timeout,
8282 #ifdef CONFIG_NET_POLL_CONTROLLER
8283         .ndo_poll_controller    = poll_bnx2,
8284 #endif
8285 };
8286
8287 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8288 {
8289         dev->vlan_features |= flags;
8290 }
8291
8292 static int __devinit
8293 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8294 {
8295         static int version_printed = 0;
8296         struct net_device *dev = NULL;
8297         struct bnx2 *bp;
8298         int rc;
8299         char str[40];
8300
8301         if (version_printed++ == 0)
8302                 pr_info("%s", version);
8303
8304         /* dev zeroed in init_etherdev */
8305         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8306
8307         if (!dev)
8308                 return -ENOMEM;
8309
8310         rc = bnx2_init_board(pdev, dev);
8311         if (rc < 0) {
8312                 free_netdev(dev);
8313                 return rc;
8314         }
8315
8316         dev->netdev_ops = &bnx2_netdev_ops;
8317         dev->watchdog_timeo = TX_TIMEOUT;
8318         dev->ethtool_ops = &bnx2_ethtool_ops;
8319
8320         bp = netdev_priv(dev);
8321
8322         pci_set_drvdata(pdev, dev);
8323
8324         rc = bnx2_request_firmware(bp);
8325         if (rc)
8326                 goto error;
8327
8328         memcpy(dev->dev_addr, bp->mac_addr, 6);
8329         memcpy(dev->perm_addr, bp->mac_addr, 6);
8330
8331         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
8332                          NETIF_F_RXHASH;
8333         vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8334         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8335                 dev->features |= NETIF_F_IPV6_CSUM;
8336                 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8337         }
8338         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8339         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8340         vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8341         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8342                 dev->features |= NETIF_F_TSO6;
8343                 vlan_features_add(dev, NETIF_F_TSO6);
8344         }
8345         if ((rc = register_netdev(dev))) {
8346                 dev_err(&pdev->dev, "Cannot register net device\n");
8347                 goto error;
8348         }
8349
8350         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8351                     board_info[ent->driver_data].name,
8352                     ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8353                     ((CHIP_ID(bp) & 0x0ff0) >> 4),
8354                     bnx2_bus_string(bp, str),
8355                     dev->base_addr,
8356                     bp->pdev->irq, dev->dev_addr);
8357
8358         return 0;
8359
8360 error:
8361         if (bp->mips_firmware)
8362                 release_firmware(bp->mips_firmware);
8363         if (bp->rv2p_firmware)
8364                 release_firmware(bp->rv2p_firmware);
8365
8366         if (bp->regview)
8367                 iounmap(bp->regview);
8368         pci_release_regions(pdev);
8369         pci_disable_device(pdev);
8370         pci_set_drvdata(pdev, NULL);
8371         free_netdev(dev);
8372         return rc;
8373 }
8374
8375 static void __devexit
8376 bnx2_remove_one(struct pci_dev *pdev)
8377 {
8378         struct net_device *dev = pci_get_drvdata(pdev);
8379         struct bnx2 *bp = netdev_priv(dev);
8380
8381         flush_scheduled_work();
8382
8383         unregister_netdev(dev);
8384
8385         if (bp->mips_firmware)
8386                 release_firmware(bp->mips_firmware);
8387         if (bp->rv2p_firmware)
8388                 release_firmware(bp->rv2p_firmware);
8389
8390         if (bp->regview)
8391                 iounmap(bp->regview);
8392
8393         kfree(bp->temp_stats_blk);
8394
8395         if (bp->flags & BNX2_FLAG_PCIE)
8396                 pci_disable_pcie_error_reporting(pdev);
8397
8398         free_netdev(dev);
8399
8400         pci_release_regions(pdev);
8401         pci_disable_device(pdev);
8402         pci_set_drvdata(pdev, NULL);
8403 }
8404
8405 static int
8406 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8407 {
8408         struct net_device *dev = pci_get_drvdata(pdev);
8409         struct bnx2 *bp = netdev_priv(dev);
8410
8411         /* PCI register 4 needs to be saved whether netif_running() or not.
8412          * MSI address and data need to be saved if using MSI and
8413          * netif_running().
8414          */
8415         pci_save_state(pdev);
8416         if (!netif_running(dev))
8417                 return 0;
8418
8419         flush_scheduled_work();
8420         bnx2_netif_stop(bp, true);
8421         netif_device_detach(dev);
8422         del_timer_sync(&bp->timer);
8423         bnx2_shutdown_chip(bp);
8424         bnx2_free_skbs(bp);
8425         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8426         return 0;
8427 }
8428
8429 static int
8430 bnx2_resume(struct pci_dev *pdev)
8431 {
8432         struct net_device *dev = pci_get_drvdata(pdev);
8433         struct bnx2 *bp = netdev_priv(dev);
8434
8435         pci_restore_state(pdev);
8436         if (!netif_running(dev))
8437                 return 0;
8438
8439         bnx2_set_power_state(bp, PCI_D0);
8440         netif_device_attach(dev);
8441         bnx2_init_nic(bp, 1);
8442         bnx2_netif_start(bp, true);
8443         return 0;
8444 }
8445
8446 /**
8447  * bnx2_io_error_detected - called when PCI error is detected
8448  * @pdev: Pointer to PCI device
8449  * @state: The current pci connection state
8450  *
8451  * This function is called after a PCI bus error affecting
8452  * this device has been detected.
8453  */
8454 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8455                                                pci_channel_state_t state)
8456 {
8457         struct net_device *dev = pci_get_drvdata(pdev);
8458         struct bnx2 *bp = netdev_priv(dev);
8459
8460         rtnl_lock();
8461         netif_device_detach(dev);
8462
8463         if (state == pci_channel_io_perm_failure) {
8464                 rtnl_unlock();
8465                 return PCI_ERS_RESULT_DISCONNECT;
8466         }
8467
8468         if (netif_running(dev)) {
8469                 bnx2_netif_stop(bp, true);
8470                 del_timer_sync(&bp->timer);
8471                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8472         }
8473
8474         pci_disable_device(pdev);
8475         rtnl_unlock();
8476
8477         /* Request a slot slot reset. */
8478         return PCI_ERS_RESULT_NEED_RESET;
8479 }
8480
8481 /**
8482  * bnx2_io_slot_reset - called after the pci bus has been reset.
8483  * @pdev: Pointer to PCI device
8484  *
8485  * Restart the card from scratch, as if from a cold-boot.
8486  */
8487 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8488 {
8489         struct net_device *dev = pci_get_drvdata(pdev);
8490         struct bnx2 *bp = netdev_priv(dev);
8491         pci_ers_result_t result;
8492         int err;
8493
8494         rtnl_lock();
8495         if (pci_enable_device(pdev)) {
8496                 dev_err(&pdev->dev,
8497                         "Cannot re-enable PCI device after reset\n");
8498                 result = PCI_ERS_RESULT_DISCONNECT;
8499         } else {
8500                 pci_set_master(pdev);
8501                 pci_restore_state(pdev);
8502                 pci_save_state(pdev);
8503
8504                 if (netif_running(dev)) {
8505                         bnx2_set_power_state(bp, PCI_D0);
8506                         bnx2_init_nic(bp, 1);
8507                 }
8508                 result = PCI_ERS_RESULT_RECOVERED;
8509         }
8510         rtnl_unlock();
8511
8512         if (!(bp->flags & BNX2_FLAG_PCIE))
8513                 return result;
8514
8515         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8516         if (err) {
8517                 dev_err(&pdev->dev,
8518                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8519                          err); /* non-fatal, continue */
8520         }
8521
8522         return result;
8523 }
8524
8525 /**
8526  * bnx2_io_resume - called when traffic can start flowing again.
8527  * @pdev: Pointer to PCI device
8528  *
8529  * This callback is called when the error recovery driver tells us that
8530  * its OK to resume normal operation.
8531  */
8532 static void bnx2_io_resume(struct pci_dev *pdev)
8533 {
8534         struct net_device *dev = pci_get_drvdata(pdev);
8535         struct bnx2 *bp = netdev_priv(dev);
8536
8537         rtnl_lock();
8538         if (netif_running(dev))
8539                 bnx2_netif_start(bp, true);
8540
8541         netif_device_attach(dev);
8542         rtnl_unlock();
8543 }
8544
8545 static struct pci_error_handlers bnx2_err_handler = {
8546         .error_detected = bnx2_io_error_detected,
8547         .slot_reset     = bnx2_io_slot_reset,
8548         .resume         = bnx2_io_resume,
8549 };
8550
8551 static struct pci_driver bnx2_pci_driver = {
8552         .name           = DRV_MODULE_NAME,
8553         .id_table       = bnx2_pci_tbl,
8554         .probe          = bnx2_init_one,
8555         .remove         = __devexit_p(bnx2_remove_one),
8556         .suspend        = bnx2_suspend,
8557         .resume         = bnx2_resume,
8558         .err_handler    = &bnx2_err_handler,
8559 };
8560
8561 static int __init bnx2_init(void)
8562 {
8563         return pci_register_driver(&bnx2_pci_driver);
8564 }
8565
8566 static void __exit bnx2_cleanup(void)
8567 {
8568         pci_unregister_driver(&bnx2_pci_driver);
8569 }
8570
8571 module_init(bnx2_init);
8572 module_exit(bnx2_cleanup);
8573
8574
8575