skge: update version
[pandora-kernel.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/checksum.h>
43 #include <linux/workqueue.h>
44 #include <linux/crc32.h>
45 #include <linux/prefetch.h>
46 #include <linux/cache.h>
47 #include <linux/firmware.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50
51 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
52 #define BCM_CNIC 1
53 #include "cnic_if.h"
54 #endif
55 #include "bnx2.h"
56 #include "bnx2_fw.h"
57
58 #define DRV_MODULE_NAME         "bnx2"
59 #define DRV_MODULE_VERSION      "2.1.6"
60 #define DRV_MODULE_RELDATE      "Mar 7, 2011"
61 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-6.2.1.fw"
62 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-6.0.15.fw"
63 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-6.2.1a.fw"
64 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-6.0.17.fw"
66
67 #define RUN_AT(x) (jiffies + (x))
68
69 /* Time in jiffies before concluding the transmitter is hung. */
70 #define TX_TIMEOUT  (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74
75 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
76 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(DRV_MODULE_VERSION);
79 MODULE_FIRMWARE(FW_MIPS_FILE_06);
80 MODULE_FIRMWARE(FW_RV2P_FILE_06);
81 MODULE_FIRMWARE(FW_MIPS_FILE_09);
82 MODULE_FIRMWARE(FW_RV2P_FILE_09);
83 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
84
85 static int disable_msi = 0;
86
87 module_param(disable_msi, int, 0);
88 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
89
90 typedef enum {
91         BCM5706 = 0,
92         NC370T,
93         NC370I,
94         BCM5706S,
95         NC370F,
96         BCM5708,
97         BCM5708S,
98         BCM5709,
99         BCM5709S,
100         BCM5716,
101         BCM5716S,
102 } board_t;
103
104 /* indexed by board_t, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
109         { "HP NC370T Multifunction Gigabit Server Adapter" },
110         { "HP NC370i Multifunction Gigabit Server Adapter" },
111         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
112         { "HP NC370F Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
114         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
115         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
119         };
120
121 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
131           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
140         { PCI_VENDOR_ID_BROADCOM, 0x163b,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
142         { PCI_VENDOR_ID_BROADCOM, 0x163c,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
144         { 0, }
145 };
146
147 static const struct flash_spec flash_table[] =
148 {
149 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
150 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
151         /* Slow EEPROM */
152         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
153          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
154          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
155          "EEPROM - slow"},
156         /* Expansion entry 0001 */
157         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
158          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
159          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160          "Entry 0001"},
161         /* Saifun SA25F010 (non-buffered flash) */
162         /* strap, cfg1, & write1 need updates */
163         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
164          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
166          "Non-buffered flash (128kB)"},
167         /* Saifun SA25F020 (non-buffered flash) */
168         /* strap, cfg1, & write1 need updates */
169         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
170          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
171          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
172          "Non-buffered flash (256kB)"},
173         /* Expansion entry 0100 */
174         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
175          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177          "Entry 0100"},
178         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
179         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
180          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
181          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
182          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
183         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
184         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
185          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
186          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
187          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
188         /* Saifun SA25F005 (non-buffered flash) */
189         /* strap, cfg1, & write1 need updates */
190         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
191          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
192          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
193          "Non-buffered flash (64kB)"},
194         /* Fast EEPROM */
195         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
196          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
197          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
198          "EEPROM - fast"},
199         /* Expansion entry 1001 */
200         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
201          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
202          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
203          "Entry 1001"},
204         /* Expansion entry 1010 */
205         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
206          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
207          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
208          "Entry 1010"},
209         /* ATMEL AT45DB011B (buffered flash) */
210         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
211          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
212          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
213          "Buffered flash (128kB)"},
214         /* Expansion entry 1100 */
215         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
216          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
218          "Entry 1100"},
219         /* Expansion entry 1101 */
220         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
221          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
222          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
223          "Entry 1101"},
224         /* Ateml Expansion entry 1110 */
225         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
226          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
227          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
228          "Entry 1110 (Atmel)"},
229         /* ATMEL AT45DB021B (buffered flash) */
230         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
231          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
232          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
233          "Buffered flash (256kB)"},
234 };
235
236 static const struct flash_spec flash_5709 = {
237         .flags          = BNX2_NV_BUFFERED,
238         .page_bits      = BCM5709_FLASH_PAGE_BITS,
239         .page_size      = BCM5709_FLASH_PAGE_SIZE,
240         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
241         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
242         .name           = "5709 Buffered flash (256kB)",
243 };
244
245 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
246
247 static void bnx2_init_napi(struct bnx2 *bp);
248 static void bnx2_del_napi(struct bnx2 *bp);
249
250 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
251 {
252         u32 diff;
253
254         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
255         barrier();
256
257         /* The ring uses 256 indices for 255 entries, one of them
258          * needs to be skipped.
259          */
260         diff = txr->tx_prod - txr->tx_cons;
261         if (unlikely(diff >= TX_DESC_CNT)) {
262                 diff &= 0xffff;
263                 if (diff == TX_DESC_CNT)
264                         diff = MAX_TX_DESC_CNT;
265         }
266         return bp->tx_ring_size - diff;
267 }
268
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272         u32 val;
273
274         spin_lock_bh(&bp->indirect_lock);
275         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277         spin_unlock_bh(&bp->indirect_lock);
278         return val;
279 }
280
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 {
284         spin_lock_bh(&bp->indirect_lock);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287         spin_unlock_bh(&bp->indirect_lock);
288 }
289
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292 {
293         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294 }
295
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298 {
299         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
300 }
301
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 {
305         offset += cid_addr;
306         spin_lock_bh(&bp->indirect_lock);
307         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308                 int i;
309
310                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313                 for (i = 0; i < 5; i++) {
314                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316                                 break;
317                         udelay(5);
318                 }
319         } else {
320                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321                 REG_WR(bp, BNX2_CTX_DATA, val);
322         }
323         spin_unlock_bh(&bp->indirect_lock);
324 }
325
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329 {
330         struct bnx2 *bp = netdev_priv(dev);
331         struct drv_ctl_io *io = &info->data.io;
332
333         switch (info->cmd) {
334         case DRV_CTL_IO_WR_CMD:
335                 bnx2_reg_wr_ind(bp, io->offset, io->data);
336                 break;
337         case DRV_CTL_IO_RD_CMD:
338                 io->data = bnx2_reg_rd_ind(bp, io->offset);
339                 break;
340         case DRV_CTL_CTX_WR_CMD:
341                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342                 break;
343         default:
344                 return -EINVAL;
345         }
346         return 0;
347 }
348
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350 {
351         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353         int sb_id;
354
355         if (bp->flags & BNX2_FLAG_USING_MSIX) {
356                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357                 bnapi->cnic_present = 0;
358                 sb_id = bp->irq_nvecs;
359                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360         } else {
361                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362                 bnapi->cnic_tag = bnapi->last_status_idx;
363                 bnapi->cnic_present = 1;
364                 sb_id = 0;
365                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366         }
367
368         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369         cp->irq_arr[0].status_blk = (void *)
370                 ((unsigned long) bnapi->status_blk.msi +
371                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372         cp->irq_arr[0].status_blk_num = sb_id;
373         cp->num_irq = 1;
374 }
375
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377                               void *data)
378 {
379         struct bnx2 *bp = netdev_priv(dev);
380         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382         if (ops == NULL)
383                 return -EINVAL;
384
385         if (cp->drv_state & CNIC_DRV_STATE_REGD)
386                 return -EBUSY;
387
388         bp->cnic_data = data;
389         rcu_assign_pointer(bp->cnic_ops, ops);
390
391         cp->num_irq = 0;
392         cp->drv_state = CNIC_DRV_STATE_REGD;
393
394         bnx2_setup_cnic_irq_info(bp);
395
396         return 0;
397 }
398
399 static int bnx2_unregister_cnic(struct net_device *dev)
400 {
401         struct bnx2 *bp = netdev_priv(dev);
402         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404
405         mutex_lock(&bp->cnic_lock);
406         cp->drv_state = 0;
407         bnapi->cnic_present = 0;
408         rcu_assign_pointer(bp->cnic_ops, NULL);
409         mutex_unlock(&bp->cnic_lock);
410         synchronize_rcu();
411         return 0;
412 }
413
414 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415 {
416         struct bnx2 *bp = netdev_priv(dev);
417         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418
419         if (!cp->max_iscsi_conn)
420                 return NULL;
421
422         cp->drv_owner = THIS_MODULE;
423         cp->chip_id = bp->chip_id;
424         cp->pdev = bp->pdev;
425         cp->io_base = bp->regview;
426         cp->drv_ctl = bnx2_drv_ctl;
427         cp->drv_register_cnic = bnx2_register_cnic;
428         cp->drv_unregister_cnic = bnx2_unregister_cnic;
429
430         return cp;
431 }
432 EXPORT_SYMBOL(bnx2_cnic_probe);
433
434 static void
435 bnx2_cnic_stop(struct bnx2 *bp)
436 {
437         struct cnic_ops *c_ops;
438         struct cnic_ctl_info info;
439
440         mutex_lock(&bp->cnic_lock);
441         c_ops = rcu_dereference_protected(bp->cnic_ops,
442                                           lockdep_is_held(&bp->cnic_lock));
443         if (c_ops) {
444                 info.cmd = CNIC_CTL_STOP_CMD;
445                 c_ops->cnic_ctl(bp->cnic_data, &info);
446         }
447         mutex_unlock(&bp->cnic_lock);
448 }
449
450 static void
451 bnx2_cnic_start(struct bnx2 *bp)
452 {
453         struct cnic_ops *c_ops;
454         struct cnic_ctl_info info;
455
456         mutex_lock(&bp->cnic_lock);
457         c_ops = rcu_dereference_protected(bp->cnic_ops,
458                                           lockdep_is_held(&bp->cnic_lock));
459         if (c_ops) {
460                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
461                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
462
463                         bnapi->cnic_tag = bnapi->last_status_idx;
464                 }
465                 info.cmd = CNIC_CTL_START_CMD;
466                 c_ops->cnic_ctl(bp->cnic_data, &info);
467         }
468         mutex_unlock(&bp->cnic_lock);
469 }
470
471 #else
472
473 static void
474 bnx2_cnic_stop(struct bnx2 *bp)
475 {
476 }
477
478 static void
479 bnx2_cnic_start(struct bnx2 *bp)
480 {
481 }
482
483 #endif
484
485 static int
486 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
487 {
488         u32 val1;
489         int i, ret;
490
491         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
492                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
493                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
494
495                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
496                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
497
498                 udelay(40);
499         }
500
501         val1 = (bp->phy_addr << 21) | (reg << 16) |
502                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
503                 BNX2_EMAC_MDIO_COMM_START_BUSY;
504         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
505
506         for (i = 0; i < 50; i++) {
507                 udelay(10);
508
509                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
510                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
511                         udelay(5);
512
513                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
514                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
515
516                         break;
517                 }
518         }
519
520         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
521                 *val = 0x0;
522                 ret = -EBUSY;
523         }
524         else {
525                 *val = val1;
526                 ret = 0;
527         }
528
529         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
530                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
531                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
532
533                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
534                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
535
536                 udelay(40);
537         }
538
539         return ret;
540 }
541
542 static int
543 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
544 {
545         u32 val1;
546         int i, ret;
547
548         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
549                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
550                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
551
552                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
553                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
554
555                 udelay(40);
556         }
557
558         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
559                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
560                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
561         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
562
563         for (i = 0; i < 50; i++) {
564                 udelay(10);
565
566                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
567                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
568                         udelay(5);
569                         break;
570                 }
571         }
572
573         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
574                 ret = -EBUSY;
575         else
576                 ret = 0;
577
578         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
579                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
580                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
581
582                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
583                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
584
585                 udelay(40);
586         }
587
588         return ret;
589 }
590
591 static void
592 bnx2_disable_int(struct bnx2 *bp)
593 {
594         int i;
595         struct bnx2_napi *bnapi;
596
597         for (i = 0; i < bp->irq_nvecs; i++) {
598                 bnapi = &bp->bnx2_napi[i];
599                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
600                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
601         }
602         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
603 }
604
605 static void
606 bnx2_enable_int(struct bnx2 *bp)
607 {
608         int i;
609         struct bnx2_napi *bnapi;
610
611         for (i = 0; i < bp->irq_nvecs; i++) {
612                 bnapi = &bp->bnx2_napi[i];
613
614                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
617                        bnapi->last_status_idx);
618
619                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
620                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
621                        bnapi->last_status_idx);
622         }
623         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
624 }
625
626 static void
627 bnx2_disable_int_sync(struct bnx2 *bp)
628 {
629         int i;
630
631         atomic_inc(&bp->intr_sem);
632         if (!netif_running(bp->dev))
633                 return;
634
635         bnx2_disable_int(bp);
636         for (i = 0; i < bp->irq_nvecs; i++)
637                 synchronize_irq(bp->irq_tbl[i].vector);
638 }
639
640 static void
641 bnx2_napi_disable(struct bnx2 *bp)
642 {
643         int i;
644
645         for (i = 0; i < bp->irq_nvecs; i++)
646                 napi_disable(&bp->bnx2_napi[i].napi);
647 }
648
649 static void
650 bnx2_napi_enable(struct bnx2 *bp)
651 {
652         int i;
653
654         for (i = 0; i < bp->irq_nvecs; i++)
655                 napi_enable(&bp->bnx2_napi[i].napi);
656 }
657
658 static void
659 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
660 {
661         if (stop_cnic)
662                 bnx2_cnic_stop(bp);
663         if (netif_running(bp->dev)) {
664                 bnx2_napi_disable(bp);
665                 netif_tx_disable(bp->dev);
666         }
667         bnx2_disable_int_sync(bp);
668         netif_carrier_off(bp->dev);     /* prevent tx timeout */
669 }
670
671 static void
672 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
673 {
674         if (atomic_dec_and_test(&bp->intr_sem)) {
675                 if (netif_running(bp->dev)) {
676                         netif_tx_wake_all_queues(bp->dev);
677                         spin_lock_bh(&bp->phy_lock);
678                         if (bp->link_up)
679                                 netif_carrier_on(bp->dev);
680                         spin_unlock_bh(&bp->phy_lock);
681                         bnx2_napi_enable(bp);
682                         bnx2_enable_int(bp);
683                         if (start_cnic)
684                                 bnx2_cnic_start(bp);
685                 }
686         }
687 }
688
689 static void
690 bnx2_free_tx_mem(struct bnx2 *bp)
691 {
692         int i;
693
694         for (i = 0; i < bp->num_tx_rings; i++) {
695                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
696                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
697
698                 if (txr->tx_desc_ring) {
699                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
700                                           txr->tx_desc_ring,
701                                           txr->tx_desc_mapping);
702                         txr->tx_desc_ring = NULL;
703                 }
704                 kfree(txr->tx_buf_ring);
705                 txr->tx_buf_ring = NULL;
706         }
707 }
708
709 static void
710 bnx2_free_rx_mem(struct bnx2 *bp)
711 {
712         int i;
713
714         for (i = 0; i < bp->num_rx_rings; i++) {
715                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
716                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
717                 int j;
718
719                 for (j = 0; j < bp->rx_max_ring; j++) {
720                         if (rxr->rx_desc_ring[j])
721                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
722                                                   rxr->rx_desc_ring[j],
723                                                   rxr->rx_desc_mapping[j]);
724                         rxr->rx_desc_ring[j] = NULL;
725                 }
726                 vfree(rxr->rx_buf_ring);
727                 rxr->rx_buf_ring = NULL;
728
729                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
730                         if (rxr->rx_pg_desc_ring[j])
731                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
732                                                   rxr->rx_pg_desc_ring[j],
733                                                   rxr->rx_pg_desc_mapping[j]);
734                         rxr->rx_pg_desc_ring[j] = NULL;
735                 }
736                 vfree(rxr->rx_pg_ring);
737                 rxr->rx_pg_ring = NULL;
738         }
739 }
740
741 static int
742 bnx2_alloc_tx_mem(struct bnx2 *bp)
743 {
744         int i;
745
746         for (i = 0; i < bp->num_tx_rings; i++) {
747                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
748                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
749
750                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
751                 if (txr->tx_buf_ring == NULL)
752                         return -ENOMEM;
753
754                 txr->tx_desc_ring =
755                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
756                                            &txr->tx_desc_mapping, GFP_KERNEL);
757                 if (txr->tx_desc_ring == NULL)
758                         return -ENOMEM;
759         }
760         return 0;
761 }
762
763 static int
764 bnx2_alloc_rx_mem(struct bnx2 *bp)
765 {
766         int i;
767
768         for (i = 0; i < bp->num_rx_rings; i++) {
769                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
770                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
771                 int j;
772
773                 rxr->rx_buf_ring =
774                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
775                 if (rxr->rx_buf_ring == NULL)
776                         return -ENOMEM;
777
778                 for (j = 0; j < bp->rx_max_ring; j++) {
779                         rxr->rx_desc_ring[j] =
780                                 dma_alloc_coherent(&bp->pdev->dev,
781                                                    RXBD_RING_SIZE,
782                                                    &rxr->rx_desc_mapping[j],
783                                                    GFP_KERNEL);
784                         if (rxr->rx_desc_ring[j] == NULL)
785                                 return -ENOMEM;
786
787                 }
788
789                 if (bp->rx_pg_ring_size) {
790                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
791                                                   bp->rx_max_pg_ring);
792                         if (rxr->rx_pg_ring == NULL)
793                                 return -ENOMEM;
794
795                 }
796
797                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
798                         rxr->rx_pg_desc_ring[j] =
799                                 dma_alloc_coherent(&bp->pdev->dev,
800                                                    RXBD_RING_SIZE,
801                                                    &rxr->rx_pg_desc_mapping[j],
802                                                    GFP_KERNEL);
803                         if (rxr->rx_pg_desc_ring[j] == NULL)
804                                 return -ENOMEM;
805
806                 }
807         }
808         return 0;
809 }
810
811 static void
812 bnx2_free_mem(struct bnx2 *bp)
813 {
814         int i;
815         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
816
817         bnx2_free_tx_mem(bp);
818         bnx2_free_rx_mem(bp);
819
820         for (i = 0; i < bp->ctx_pages; i++) {
821                 if (bp->ctx_blk[i]) {
822                         dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
823                                           bp->ctx_blk[i],
824                                           bp->ctx_blk_mapping[i]);
825                         bp->ctx_blk[i] = NULL;
826                 }
827         }
828         if (bnapi->status_blk.msi) {
829                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
830                                   bnapi->status_blk.msi,
831                                   bp->status_blk_mapping);
832                 bnapi->status_blk.msi = NULL;
833                 bp->stats_blk = NULL;
834         }
835 }
836
837 static int
838 bnx2_alloc_mem(struct bnx2 *bp)
839 {
840         int i, status_blk_size, err;
841         struct bnx2_napi *bnapi;
842         void *status_blk;
843
844         /* Combine status and statistics blocks into one allocation. */
845         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
846         if (bp->flags & BNX2_FLAG_MSIX_CAP)
847                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
848                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
849         bp->status_stats_size = status_blk_size +
850                                 sizeof(struct statistics_block);
851
852         status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
853                                         &bp->status_blk_mapping, GFP_KERNEL);
854         if (status_blk == NULL)
855                 goto alloc_mem_err;
856
857         memset(status_blk, 0, bp->status_stats_size);
858
859         bnapi = &bp->bnx2_napi[0];
860         bnapi->status_blk.msi = status_blk;
861         bnapi->hw_tx_cons_ptr =
862                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
863         bnapi->hw_rx_cons_ptr =
864                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
865         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
866                 for (i = 1; i < bp->irq_nvecs; i++) {
867                         struct status_block_msix *sblk;
868
869                         bnapi = &bp->bnx2_napi[i];
870
871                         sblk = (void *) (status_blk +
872                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
873                         bnapi->status_blk.msix = sblk;
874                         bnapi->hw_tx_cons_ptr =
875                                 &sblk->status_tx_quick_consumer_index;
876                         bnapi->hw_rx_cons_ptr =
877                                 &sblk->status_rx_quick_consumer_index;
878                         bnapi->int_num = i << 24;
879                 }
880         }
881
882         bp->stats_blk = status_blk + status_blk_size;
883
884         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
885
886         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
887                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
888                 if (bp->ctx_pages == 0)
889                         bp->ctx_pages = 1;
890                 for (i = 0; i < bp->ctx_pages; i++) {
891                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
892                                                 BCM_PAGE_SIZE,
893                                                 &bp->ctx_blk_mapping[i],
894                                                 GFP_KERNEL);
895                         if (bp->ctx_blk[i] == NULL)
896                                 goto alloc_mem_err;
897                 }
898         }
899
900         err = bnx2_alloc_rx_mem(bp);
901         if (err)
902                 goto alloc_mem_err;
903
904         err = bnx2_alloc_tx_mem(bp);
905         if (err)
906                 goto alloc_mem_err;
907
908         return 0;
909
910 alloc_mem_err:
911         bnx2_free_mem(bp);
912         return -ENOMEM;
913 }
914
915 static void
916 bnx2_report_fw_link(struct bnx2 *bp)
917 {
918         u32 fw_link_status = 0;
919
920         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
921                 return;
922
923         if (bp->link_up) {
924                 u32 bmsr;
925
926                 switch (bp->line_speed) {
927                 case SPEED_10:
928                         if (bp->duplex == DUPLEX_HALF)
929                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
930                         else
931                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
932                         break;
933                 case SPEED_100:
934                         if (bp->duplex == DUPLEX_HALF)
935                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
936                         else
937                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
938                         break;
939                 case SPEED_1000:
940                         if (bp->duplex == DUPLEX_HALF)
941                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
942                         else
943                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
944                         break;
945                 case SPEED_2500:
946                         if (bp->duplex == DUPLEX_HALF)
947                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
948                         else
949                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
950                         break;
951                 }
952
953                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
954
955                 if (bp->autoneg) {
956                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
957
958                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
959                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
960
961                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
962                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
963                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
964                         else
965                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
966                 }
967         }
968         else
969                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
970
971         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
972 }
973
974 static char *
975 bnx2_xceiver_str(struct bnx2 *bp)
976 {
977         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
978                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
979                  "Copper");
980 }
981
982 static void
983 bnx2_report_link(struct bnx2 *bp)
984 {
985         if (bp->link_up) {
986                 netif_carrier_on(bp->dev);
987                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
988                             bnx2_xceiver_str(bp),
989                             bp->line_speed,
990                             bp->duplex == DUPLEX_FULL ? "full" : "half");
991
992                 if (bp->flow_ctrl) {
993                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
994                                 pr_cont(", receive ");
995                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
996                                         pr_cont("& transmit ");
997                         }
998                         else {
999                                 pr_cont(", transmit ");
1000                         }
1001                         pr_cont("flow control ON");
1002                 }
1003                 pr_cont("\n");
1004         } else {
1005                 netif_carrier_off(bp->dev);
1006                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1007                            bnx2_xceiver_str(bp));
1008         }
1009
1010         bnx2_report_fw_link(bp);
1011 }
1012
1013 static void
1014 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1015 {
1016         u32 local_adv, remote_adv;
1017
1018         bp->flow_ctrl = 0;
1019         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1020                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1021
1022                 if (bp->duplex == DUPLEX_FULL) {
1023                         bp->flow_ctrl = bp->req_flow_ctrl;
1024                 }
1025                 return;
1026         }
1027
1028         if (bp->duplex != DUPLEX_FULL) {
1029                 return;
1030         }
1031
1032         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1033             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1034                 u32 val;
1035
1036                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1037                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1038                         bp->flow_ctrl |= FLOW_CTRL_TX;
1039                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1040                         bp->flow_ctrl |= FLOW_CTRL_RX;
1041                 return;
1042         }
1043
1044         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1045         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1046
1047         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1048                 u32 new_local_adv = 0;
1049                 u32 new_remote_adv = 0;
1050
1051                 if (local_adv & ADVERTISE_1000XPAUSE)
1052                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1053                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1054                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1055                 if (remote_adv & ADVERTISE_1000XPAUSE)
1056                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1057                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1058                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1059
1060                 local_adv = new_local_adv;
1061                 remote_adv = new_remote_adv;
1062         }
1063
1064         /* See Table 28B-3 of 802.3ab-1999 spec. */
1065         if (local_adv & ADVERTISE_PAUSE_CAP) {
1066                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1067                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1068                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1069                         }
1070                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1071                                 bp->flow_ctrl = FLOW_CTRL_RX;
1072                         }
1073                 }
1074                 else {
1075                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1076                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1077                         }
1078                 }
1079         }
1080         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1081                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1082                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1083
1084                         bp->flow_ctrl = FLOW_CTRL_TX;
1085                 }
1086         }
1087 }
1088
1089 static int
1090 bnx2_5709s_linkup(struct bnx2 *bp)
1091 {
1092         u32 val, speed;
1093
1094         bp->link_up = 1;
1095
1096         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1097         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1098         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1099
1100         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1101                 bp->line_speed = bp->req_line_speed;
1102                 bp->duplex = bp->req_duplex;
1103                 return 0;
1104         }
1105         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1106         switch (speed) {
1107                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1108                         bp->line_speed = SPEED_10;
1109                         break;
1110                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1111                         bp->line_speed = SPEED_100;
1112                         break;
1113                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1114                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1115                         bp->line_speed = SPEED_1000;
1116                         break;
1117                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1118                         bp->line_speed = SPEED_2500;
1119                         break;
1120         }
1121         if (val & MII_BNX2_GP_TOP_AN_FD)
1122                 bp->duplex = DUPLEX_FULL;
1123         else
1124                 bp->duplex = DUPLEX_HALF;
1125         return 0;
1126 }
1127
1128 static int
1129 bnx2_5708s_linkup(struct bnx2 *bp)
1130 {
1131         u32 val;
1132
1133         bp->link_up = 1;
1134         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1135         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1136                 case BCM5708S_1000X_STAT1_SPEED_10:
1137                         bp->line_speed = SPEED_10;
1138                         break;
1139                 case BCM5708S_1000X_STAT1_SPEED_100:
1140                         bp->line_speed = SPEED_100;
1141                         break;
1142                 case BCM5708S_1000X_STAT1_SPEED_1G:
1143                         bp->line_speed = SPEED_1000;
1144                         break;
1145                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1146                         bp->line_speed = SPEED_2500;
1147                         break;
1148         }
1149         if (val & BCM5708S_1000X_STAT1_FD)
1150                 bp->duplex = DUPLEX_FULL;
1151         else
1152                 bp->duplex = DUPLEX_HALF;
1153
1154         return 0;
1155 }
1156
1157 static int
1158 bnx2_5706s_linkup(struct bnx2 *bp)
1159 {
1160         u32 bmcr, local_adv, remote_adv, common;
1161
1162         bp->link_up = 1;
1163         bp->line_speed = SPEED_1000;
1164
1165         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1166         if (bmcr & BMCR_FULLDPLX) {
1167                 bp->duplex = DUPLEX_FULL;
1168         }
1169         else {
1170                 bp->duplex = DUPLEX_HALF;
1171         }
1172
1173         if (!(bmcr & BMCR_ANENABLE)) {
1174                 return 0;
1175         }
1176
1177         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1178         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1179
1180         common = local_adv & remote_adv;
1181         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1182
1183                 if (common & ADVERTISE_1000XFULL) {
1184                         bp->duplex = DUPLEX_FULL;
1185                 }
1186                 else {
1187                         bp->duplex = DUPLEX_HALF;
1188                 }
1189         }
1190
1191         return 0;
1192 }
1193
1194 static int
1195 bnx2_copper_linkup(struct bnx2 *bp)
1196 {
1197         u32 bmcr;
1198
1199         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1200         if (bmcr & BMCR_ANENABLE) {
1201                 u32 local_adv, remote_adv, common;
1202
1203                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1204                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1205
1206                 common = local_adv & (remote_adv >> 2);
1207                 if (common & ADVERTISE_1000FULL) {
1208                         bp->line_speed = SPEED_1000;
1209                         bp->duplex = DUPLEX_FULL;
1210                 }
1211                 else if (common & ADVERTISE_1000HALF) {
1212                         bp->line_speed = SPEED_1000;
1213                         bp->duplex = DUPLEX_HALF;
1214                 }
1215                 else {
1216                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1217                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1218
1219                         common = local_adv & remote_adv;
1220                         if (common & ADVERTISE_100FULL) {
1221                                 bp->line_speed = SPEED_100;
1222                                 bp->duplex = DUPLEX_FULL;
1223                         }
1224                         else if (common & ADVERTISE_100HALF) {
1225                                 bp->line_speed = SPEED_100;
1226                                 bp->duplex = DUPLEX_HALF;
1227                         }
1228                         else if (common & ADVERTISE_10FULL) {
1229                                 bp->line_speed = SPEED_10;
1230                                 bp->duplex = DUPLEX_FULL;
1231                         }
1232                         else if (common & ADVERTISE_10HALF) {
1233                                 bp->line_speed = SPEED_10;
1234                                 bp->duplex = DUPLEX_HALF;
1235                         }
1236                         else {
1237                                 bp->line_speed = 0;
1238                                 bp->link_up = 0;
1239                         }
1240                 }
1241         }
1242         else {
1243                 if (bmcr & BMCR_SPEED100) {
1244                         bp->line_speed = SPEED_100;
1245                 }
1246                 else {
1247                         bp->line_speed = SPEED_10;
1248                 }
1249                 if (bmcr & BMCR_FULLDPLX) {
1250                         bp->duplex = DUPLEX_FULL;
1251                 }
1252                 else {
1253                         bp->duplex = DUPLEX_HALF;
1254                 }
1255         }
1256
1257         return 0;
1258 }
1259
1260 static void
1261 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1262 {
1263         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1264
1265         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1266         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1267         val |= 0x02 << 8;
1268
1269         if (bp->flow_ctrl & FLOW_CTRL_TX)
1270                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1271
1272         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1273 }
1274
1275 static void
1276 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1277 {
1278         int i;
1279         u32 cid;
1280
1281         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1282                 if (i == 1)
1283                         cid = RX_RSS_CID;
1284                 bnx2_init_rx_context(bp, cid);
1285         }
1286 }
1287
1288 static void
1289 bnx2_set_mac_link(struct bnx2 *bp)
1290 {
1291         u32 val;
1292
1293         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1294         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1295                 (bp->duplex == DUPLEX_HALF)) {
1296                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1297         }
1298
1299         /* Configure the EMAC mode register. */
1300         val = REG_RD(bp, BNX2_EMAC_MODE);
1301
1302         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1303                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1304                 BNX2_EMAC_MODE_25G_MODE);
1305
1306         if (bp->link_up) {
1307                 switch (bp->line_speed) {
1308                         case SPEED_10:
1309                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1310                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1311                                         break;
1312                                 }
1313                                 /* fall through */
1314                         case SPEED_100:
1315                                 val |= BNX2_EMAC_MODE_PORT_MII;
1316                                 break;
1317                         case SPEED_2500:
1318                                 val |= BNX2_EMAC_MODE_25G_MODE;
1319                                 /* fall through */
1320                         case SPEED_1000:
1321                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1322                                 break;
1323                 }
1324         }
1325         else {
1326                 val |= BNX2_EMAC_MODE_PORT_GMII;
1327         }
1328
1329         /* Set the MAC to operate in the appropriate duplex mode. */
1330         if (bp->duplex == DUPLEX_HALF)
1331                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1332         REG_WR(bp, BNX2_EMAC_MODE, val);
1333
1334         /* Enable/disable rx PAUSE. */
1335         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1336
1337         if (bp->flow_ctrl & FLOW_CTRL_RX)
1338                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1339         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1340
1341         /* Enable/disable tx PAUSE. */
1342         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1343         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1344
1345         if (bp->flow_ctrl & FLOW_CTRL_TX)
1346                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1347         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1348
1349         /* Acknowledge the interrupt. */
1350         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1351
1352         bnx2_init_all_rx_contexts(bp);
1353 }
1354
1355 static void
1356 bnx2_enable_bmsr1(struct bnx2 *bp)
1357 {
1358         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1359             (CHIP_NUM(bp) == CHIP_NUM_5709))
1360                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1361                                MII_BNX2_BLK_ADDR_GP_STATUS);
1362 }
1363
1364 static void
1365 bnx2_disable_bmsr1(struct bnx2 *bp)
1366 {
1367         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1368             (CHIP_NUM(bp) == CHIP_NUM_5709))
1369                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1370                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1371 }
1372
1373 static int
1374 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1375 {
1376         u32 up1;
1377         int ret = 1;
1378
1379         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1380                 return 0;
1381
1382         if (bp->autoneg & AUTONEG_SPEED)
1383                 bp->advertising |= ADVERTISED_2500baseX_Full;
1384
1385         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1386                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1387
1388         bnx2_read_phy(bp, bp->mii_up1, &up1);
1389         if (!(up1 & BCM5708S_UP1_2G5)) {
1390                 up1 |= BCM5708S_UP1_2G5;
1391                 bnx2_write_phy(bp, bp->mii_up1, up1);
1392                 ret = 0;
1393         }
1394
1395         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1396                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1397                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1398
1399         return ret;
1400 }
1401
1402 static int
1403 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1404 {
1405         u32 up1;
1406         int ret = 0;
1407
1408         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1409                 return 0;
1410
1411         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1412                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1413
1414         bnx2_read_phy(bp, bp->mii_up1, &up1);
1415         if (up1 & BCM5708S_UP1_2G5) {
1416                 up1 &= ~BCM5708S_UP1_2G5;
1417                 bnx2_write_phy(bp, bp->mii_up1, up1);
1418                 ret = 1;
1419         }
1420
1421         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1422                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1423                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1424
1425         return ret;
1426 }
1427
1428 static void
1429 bnx2_enable_forced_2g5(struct bnx2 *bp)
1430 {
1431         u32 uninitialized_var(bmcr);
1432         int err;
1433
1434         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1435                 return;
1436
1437         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1438                 u32 val;
1439
1440                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1441                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1442                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1443                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1444                         val |= MII_BNX2_SD_MISC1_FORCE |
1445                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1446                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1447                 }
1448
1449                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1450                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1451                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1452
1453         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1454                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1455                 if (!err)
1456                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1457         } else {
1458                 return;
1459         }
1460
1461         if (err)
1462                 return;
1463
1464         if (bp->autoneg & AUTONEG_SPEED) {
1465                 bmcr &= ~BMCR_ANENABLE;
1466                 if (bp->req_duplex == DUPLEX_FULL)
1467                         bmcr |= BMCR_FULLDPLX;
1468         }
1469         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1470 }
1471
1472 static void
1473 bnx2_disable_forced_2g5(struct bnx2 *bp)
1474 {
1475         u32 uninitialized_var(bmcr);
1476         int err;
1477
1478         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1479                 return;
1480
1481         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1482                 u32 val;
1483
1484                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1485                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1486                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1487                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1488                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1489                 }
1490
1491                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1492                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1493                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1494
1495         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1496                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1497                 if (!err)
1498                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1499         } else {
1500                 return;
1501         }
1502
1503         if (err)
1504                 return;
1505
1506         if (bp->autoneg & AUTONEG_SPEED)
1507                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1508         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1509 }
1510
1511 static void
1512 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1513 {
1514         u32 val;
1515
1516         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1517         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1518         if (start)
1519                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1520         else
1521                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1522 }
1523
1524 static int
1525 bnx2_set_link(struct bnx2 *bp)
1526 {
1527         u32 bmsr;
1528         u8 link_up;
1529
1530         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1531                 bp->link_up = 1;
1532                 return 0;
1533         }
1534
1535         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1536                 return 0;
1537
1538         link_up = bp->link_up;
1539
1540         bnx2_enable_bmsr1(bp);
1541         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1542         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1543         bnx2_disable_bmsr1(bp);
1544
1545         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1546             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1547                 u32 val, an_dbg;
1548
1549                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1550                         bnx2_5706s_force_link_dn(bp, 0);
1551                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1552                 }
1553                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1554
1555                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1556                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1557                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1558
1559                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1560                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1561                         bmsr |= BMSR_LSTATUS;
1562                 else
1563                         bmsr &= ~BMSR_LSTATUS;
1564         }
1565
1566         if (bmsr & BMSR_LSTATUS) {
1567                 bp->link_up = 1;
1568
1569                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1570                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1571                                 bnx2_5706s_linkup(bp);
1572                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1573                                 bnx2_5708s_linkup(bp);
1574                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1575                                 bnx2_5709s_linkup(bp);
1576                 }
1577                 else {
1578                         bnx2_copper_linkup(bp);
1579                 }
1580                 bnx2_resolve_flow_ctrl(bp);
1581         }
1582         else {
1583                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1584                     (bp->autoneg & AUTONEG_SPEED))
1585                         bnx2_disable_forced_2g5(bp);
1586
1587                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1588                         u32 bmcr;
1589
1590                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1591                         bmcr |= BMCR_ANENABLE;
1592                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1593
1594                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1595                 }
1596                 bp->link_up = 0;
1597         }
1598
1599         if (bp->link_up != link_up) {
1600                 bnx2_report_link(bp);
1601         }
1602
1603         bnx2_set_mac_link(bp);
1604
1605         return 0;
1606 }
1607
1608 static int
1609 bnx2_reset_phy(struct bnx2 *bp)
1610 {
1611         int i;
1612         u32 reg;
1613
1614         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1615
1616 #define PHY_RESET_MAX_WAIT 100
1617         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1618                 udelay(10);
1619
1620                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1621                 if (!(reg & BMCR_RESET)) {
1622                         udelay(20);
1623                         break;
1624                 }
1625         }
1626         if (i == PHY_RESET_MAX_WAIT) {
1627                 return -EBUSY;
1628         }
1629         return 0;
1630 }
1631
1632 static u32
1633 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1634 {
1635         u32 adv = 0;
1636
1637         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1638                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1639
1640                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1641                         adv = ADVERTISE_1000XPAUSE;
1642                 }
1643                 else {
1644                         adv = ADVERTISE_PAUSE_CAP;
1645                 }
1646         }
1647         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1648                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1649                         adv = ADVERTISE_1000XPSE_ASYM;
1650                 }
1651                 else {
1652                         adv = ADVERTISE_PAUSE_ASYM;
1653                 }
1654         }
1655         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1656                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1657                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1658                 }
1659                 else {
1660                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1661                 }
1662         }
1663         return adv;
1664 }
1665
1666 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1667
1668 static int
1669 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1670 __releases(&bp->phy_lock)
1671 __acquires(&bp->phy_lock)
1672 {
1673         u32 speed_arg = 0, pause_adv;
1674
1675         pause_adv = bnx2_phy_get_pause_adv(bp);
1676
1677         if (bp->autoneg & AUTONEG_SPEED) {
1678                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1679                 if (bp->advertising & ADVERTISED_10baseT_Half)
1680                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1681                 if (bp->advertising & ADVERTISED_10baseT_Full)
1682                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1683                 if (bp->advertising & ADVERTISED_100baseT_Half)
1684                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1685                 if (bp->advertising & ADVERTISED_100baseT_Full)
1686                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1687                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1688                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1689                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1690                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1691         } else {
1692                 if (bp->req_line_speed == SPEED_2500)
1693                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1694                 else if (bp->req_line_speed == SPEED_1000)
1695                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1696                 else if (bp->req_line_speed == SPEED_100) {
1697                         if (bp->req_duplex == DUPLEX_FULL)
1698                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1699                         else
1700                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1701                 } else if (bp->req_line_speed == SPEED_10) {
1702                         if (bp->req_duplex == DUPLEX_FULL)
1703                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1704                         else
1705                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1706                 }
1707         }
1708
1709         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1710                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1711         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1712                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1713
1714         if (port == PORT_TP)
1715                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1716                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1717
1718         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1719
1720         spin_unlock_bh(&bp->phy_lock);
1721         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1722         spin_lock_bh(&bp->phy_lock);
1723
1724         return 0;
1725 }
1726
1727 static int
1728 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1729 __releases(&bp->phy_lock)
1730 __acquires(&bp->phy_lock)
1731 {
1732         u32 adv, bmcr;
1733         u32 new_adv = 0;
1734
1735         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1736                 return bnx2_setup_remote_phy(bp, port);
1737
1738         if (!(bp->autoneg & AUTONEG_SPEED)) {
1739                 u32 new_bmcr;
1740                 int force_link_down = 0;
1741
1742                 if (bp->req_line_speed == SPEED_2500) {
1743                         if (!bnx2_test_and_enable_2g5(bp))
1744                                 force_link_down = 1;
1745                 } else if (bp->req_line_speed == SPEED_1000) {
1746                         if (bnx2_test_and_disable_2g5(bp))
1747                                 force_link_down = 1;
1748                 }
1749                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1750                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1751
1752                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1753                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1754                 new_bmcr |= BMCR_SPEED1000;
1755
1756                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1757                         if (bp->req_line_speed == SPEED_2500)
1758                                 bnx2_enable_forced_2g5(bp);
1759                         else if (bp->req_line_speed == SPEED_1000) {
1760                                 bnx2_disable_forced_2g5(bp);
1761                                 new_bmcr &= ~0x2000;
1762                         }
1763
1764                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1765                         if (bp->req_line_speed == SPEED_2500)
1766                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1767                         else
1768                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1769                 }
1770
1771                 if (bp->req_duplex == DUPLEX_FULL) {
1772                         adv |= ADVERTISE_1000XFULL;
1773                         new_bmcr |= BMCR_FULLDPLX;
1774                 }
1775                 else {
1776                         adv |= ADVERTISE_1000XHALF;
1777                         new_bmcr &= ~BMCR_FULLDPLX;
1778                 }
1779                 if ((new_bmcr != bmcr) || (force_link_down)) {
1780                         /* Force a link down visible on the other side */
1781                         if (bp->link_up) {
1782                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1783                                                ~(ADVERTISE_1000XFULL |
1784                                                  ADVERTISE_1000XHALF));
1785                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1786                                         BMCR_ANRESTART | BMCR_ANENABLE);
1787
1788                                 bp->link_up = 0;
1789                                 netif_carrier_off(bp->dev);
1790                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1791                                 bnx2_report_link(bp);
1792                         }
1793                         bnx2_write_phy(bp, bp->mii_adv, adv);
1794                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1795                 } else {
1796                         bnx2_resolve_flow_ctrl(bp);
1797                         bnx2_set_mac_link(bp);
1798                 }
1799                 return 0;
1800         }
1801
1802         bnx2_test_and_enable_2g5(bp);
1803
1804         if (bp->advertising & ADVERTISED_1000baseT_Full)
1805                 new_adv |= ADVERTISE_1000XFULL;
1806
1807         new_adv |= bnx2_phy_get_pause_adv(bp);
1808
1809         bnx2_read_phy(bp, bp->mii_adv, &adv);
1810         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1811
1812         bp->serdes_an_pending = 0;
1813         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1814                 /* Force a link down visible on the other side */
1815                 if (bp->link_up) {
1816                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1817                         spin_unlock_bh(&bp->phy_lock);
1818                         msleep(20);
1819                         spin_lock_bh(&bp->phy_lock);
1820                 }
1821
1822                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1823                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1824                         BMCR_ANENABLE);
1825                 /* Speed up link-up time when the link partner
1826                  * does not autonegotiate which is very common
1827                  * in blade servers. Some blade servers use
1828                  * IPMI for kerboard input and it's important
1829                  * to minimize link disruptions. Autoneg. involves
1830                  * exchanging base pages plus 3 next pages and
1831                  * normally completes in about 120 msec.
1832                  */
1833                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1834                 bp->serdes_an_pending = 1;
1835                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1836         } else {
1837                 bnx2_resolve_flow_ctrl(bp);
1838                 bnx2_set_mac_link(bp);
1839         }
1840
1841         return 0;
1842 }
1843
1844 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1845         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1846                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1847                 (ADVERTISED_1000baseT_Full)
1848
1849 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1850         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1851         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1852         ADVERTISED_1000baseT_Full)
1853
1854 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1855         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1856
1857 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1858
1859 static void
1860 bnx2_set_default_remote_link(struct bnx2 *bp)
1861 {
1862         u32 link;
1863
1864         if (bp->phy_port == PORT_TP)
1865                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1866         else
1867                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1868
1869         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1870                 bp->req_line_speed = 0;
1871                 bp->autoneg |= AUTONEG_SPEED;
1872                 bp->advertising = ADVERTISED_Autoneg;
1873                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1874                         bp->advertising |= ADVERTISED_10baseT_Half;
1875                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1876                         bp->advertising |= ADVERTISED_10baseT_Full;
1877                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1878                         bp->advertising |= ADVERTISED_100baseT_Half;
1879                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1880                         bp->advertising |= ADVERTISED_100baseT_Full;
1881                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1882                         bp->advertising |= ADVERTISED_1000baseT_Full;
1883                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1884                         bp->advertising |= ADVERTISED_2500baseX_Full;
1885         } else {
1886                 bp->autoneg = 0;
1887                 bp->advertising = 0;
1888                 bp->req_duplex = DUPLEX_FULL;
1889                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1890                         bp->req_line_speed = SPEED_10;
1891                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1892                                 bp->req_duplex = DUPLEX_HALF;
1893                 }
1894                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1895                         bp->req_line_speed = SPEED_100;
1896                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1897                                 bp->req_duplex = DUPLEX_HALF;
1898                 }
1899                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1900                         bp->req_line_speed = SPEED_1000;
1901                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1902                         bp->req_line_speed = SPEED_2500;
1903         }
1904 }
1905
1906 static void
1907 bnx2_set_default_link(struct bnx2 *bp)
1908 {
1909         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1910                 bnx2_set_default_remote_link(bp);
1911                 return;
1912         }
1913
1914         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1915         bp->req_line_speed = 0;
1916         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1917                 u32 reg;
1918
1919                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1920
1921                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1922                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1923                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1924                         bp->autoneg = 0;
1925                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1926                         bp->req_duplex = DUPLEX_FULL;
1927                 }
1928         } else
1929                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1930 }
1931
1932 static void
1933 bnx2_send_heart_beat(struct bnx2 *bp)
1934 {
1935         u32 msg;
1936         u32 addr;
1937
1938         spin_lock(&bp->indirect_lock);
1939         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1940         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1941         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1942         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1943         spin_unlock(&bp->indirect_lock);
1944 }
1945
1946 static void
1947 bnx2_remote_phy_event(struct bnx2 *bp)
1948 {
1949         u32 msg;
1950         u8 link_up = bp->link_up;
1951         u8 old_port;
1952
1953         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1954
1955         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1956                 bnx2_send_heart_beat(bp);
1957
1958         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1959
1960         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1961                 bp->link_up = 0;
1962         else {
1963                 u32 speed;
1964
1965                 bp->link_up = 1;
1966                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1967                 bp->duplex = DUPLEX_FULL;
1968                 switch (speed) {
1969                         case BNX2_LINK_STATUS_10HALF:
1970                                 bp->duplex = DUPLEX_HALF;
1971                         case BNX2_LINK_STATUS_10FULL:
1972                                 bp->line_speed = SPEED_10;
1973                                 break;
1974                         case BNX2_LINK_STATUS_100HALF:
1975                                 bp->duplex = DUPLEX_HALF;
1976                         case BNX2_LINK_STATUS_100BASE_T4:
1977                         case BNX2_LINK_STATUS_100FULL:
1978                                 bp->line_speed = SPEED_100;
1979                                 break;
1980                         case BNX2_LINK_STATUS_1000HALF:
1981                                 bp->duplex = DUPLEX_HALF;
1982                         case BNX2_LINK_STATUS_1000FULL:
1983                                 bp->line_speed = SPEED_1000;
1984                                 break;
1985                         case BNX2_LINK_STATUS_2500HALF:
1986                                 bp->duplex = DUPLEX_HALF;
1987                         case BNX2_LINK_STATUS_2500FULL:
1988                                 bp->line_speed = SPEED_2500;
1989                                 break;
1990                         default:
1991                                 bp->line_speed = 0;
1992                                 break;
1993                 }
1994
1995                 bp->flow_ctrl = 0;
1996                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1997                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1998                         if (bp->duplex == DUPLEX_FULL)
1999                                 bp->flow_ctrl = bp->req_flow_ctrl;
2000                 } else {
2001                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2002                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2003                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2004                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2005                 }
2006
2007                 old_port = bp->phy_port;
2008                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2009                         bp->phy_port = PORT_FIBRE;
2010                 else
2011                         bp->phy_port = PORT_TP;
2012
2013                 if (old_port != bp->phy_port)
2014                         bnx2_set_default_link(bp);
2015
2016         }
2017         if (bp->link_up != link_up)
2018                 bnx2_report_link(bp);
2019
2020         bnx2_set_mac_link(bp);
2021 }
2022
2023 static int
2024 bnx2_set_remote_link(struct bnx2 *bp)
2025 {
2026         u32 evt_code;
2027
2028         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2029         switch (evt_code) {
2030                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2031                         bnx2_remote_phy_event(bp);
2032                         break;
2033                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2034                 default:
2035                         bnx2_send_heart_beat(bp);
2036                         break;
2037         }
2038         return 0;
2039 }
2040
2041 static int
2042 bnx2_setup_copper_phy(struct bnx2 *bp)
2043 __releases(&bp->phy_lock)
2044 __acquires(&bp->phy_lock)
2045 {
2046         u32 bmcr;
2047         u32 new_bmcr;
2048
2049         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2050
2051         if (bp->autoneg & AUTONEG_SPEED) {
2052                 u32 adv_reg, adv1000_reg;
2053                 u32 new_adv_reg = 0;
2054                 u32 new_adv1000_reg = 0;
2055
2056                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2057                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2058                         ADVERTISE_PAUSE_ASYM);
2059
2060                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2061                 adv1000_reg &= PHY_ALL_1000_SPEED;
2062
2063                 if (bp->advertising & ADVERTISED_10baseT_Half)
2064                         new_adv_reg |= ADVERTISE_10HALF;
2065                 if (bp->advertising & ADVERTISED_10baseT_Full)
2066                         new_adv_reg |= ADVERTISE_10FULL;
2067                 if (bp->advertising & ADVERTISED_100baseT_Half)
2068                         new_adv_reg |= ADVERTISE_100HALF;
2069                 if (bp->advertising & ADVERTISED_100baseT_Full)
2070                         new_adv_reg |= ADVERTISE_100FULL;
2071                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2072                         new_adv1000_reg |= ADVERTISE_1000FULL;
2073
2074                 new_adv_reg |= ADVERTISE_CSMA;
2075
2076                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2077
2078                 if ((adv1000_reg != new_adv1000_reg) ||
2079                         (adv_reg != new_adv_reg) ||
2080                         ((bmcr & BMCR_ANENABLE) == 0)) {
2081
2082                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2083                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2084                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2085                                 BMCR_ANENABLE);
2086                 }
2087                 else if (bp->link_up) {
2088                         /* Flow ctrl may have changed from auto to forced */
2089                         /* or vice-versa. */
2090
2091                         bnx2_resolve_flow_ctrl(bp);
2092                         bnx2_set_mac_link(bp);
2093                 }
2094                 return 0;
2095         }
2096
2097         new_bmcr = 0;
2098         if (bp->req_line_speed == SPEED_100) {
2099                 new_bmcr |= BMCR_SPEED100;
2100         }
2101         if (bp->req_duplex == DUPLEX_FULL) {
2102                 new_bmcr |= BMCR_FULLDPLX;
2103         }
2104         if (new_bmcr != bmcr) {
2105                 u32 bmsr;
2106
2107                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2108                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2109
2110                 if (bmsr & BMSR_LSTATUS) {
2111                         /* Force link down */
2112                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2113                         spin_unlock_bh(&bp->phy_lock);
2114                         msleep(50);
2115                         spin_lock_bh(&bp->phy_lock);
2116
2117                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2118                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2119                 }
2120
2121                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2122
2123                 /* Normally, the new speed is setup after the link has
2124                  * gone down and up again. In some cases, link will not go
2125                  * down so we need to set up the new speed here.
2126                  */
2127                 if (bmsr & BMSR_LSTATUS) {
2128                         bp->line_speed = bp->req_line_speed;
2129                         bp->duplex = bp->req_duplex;
2130                         bnx2_resolve_flow_ctrl(bp);
2131                         bnx2_set_mac_link(bp);
2132                 }
2133         } else {
2134                 bnx2_resolve_flow_ctrl(bp);
2135                 bnx2_set_mac_link(bp);
2136         }
2137         return 0;
2138 }
2139
2140 static int
2141 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2142 __releases(&bp->phy_lock)
2143 __acquires(&bp->phy_lock)
2144 {
2145         if (bp->loopback == MAC_LOOPBACK)
2146                 return 0;
2147
2148         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2149                 return bnx2_setup_serdes_phy(bp, port);
2150         }
2151         else {
2152                 return bnx2_setup_copper_phy(bp);
2153         }
2154 }
2155
2156 static int
2157 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2158 {
2159         u32 val;
2160
2161         bp->mii_bmcr = MII_BMCR + 0x10;
2162         bp->mii_bmsr = MII_BMSR + 0x10;
2163         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2164         bp->mii_adv = MII_ADVERTISE + 0x10;
2165         bp->mii_lpa = MII_LPA + 0x10;
2166         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2167
2168         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2169         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2170
2171         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2172         if (reset_phy)
2173                 bnx2_reset_phy(bp);
2174
2175         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2176
2177         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2178         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2179         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2180         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2181
2182         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2183         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2184         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2185                 val |= BCM5708S_UP1_2G5;
2186         else
2187                 val &= ~BCM5708S_UP1_2G5;
2188         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2189
2190         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2191         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2192         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2193         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2194
2195         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2196
2197         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2198               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2199         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2200
2201         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2202
2203         return 0;
2204 }
2205
2206 static int
2207 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2208 {
2209         u32 val;
2210
2211         if (reset_phy)
2212                 bnx2_reset_phy(bp);
2213
2214         bp->mii_up1 = BCM5708S_UP1;
2215
2216         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2217         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2218         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2219
2220         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2221         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2222         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2223
2224         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2225         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2226         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2227
2228         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2229                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2230                 val |= BCM5708S_UP1_2G5;
2231                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2232         }
2233
2234         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2235             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2236             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2237                 /* increase tx signal amplitude */
2238                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2239                                BCM5708S_BLK_ADDR_TX_MISC);
2240                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2241                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2242                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2243                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2244         }
2245
2246         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2247               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2248
2249         if (val) {
2250                 u32 is_backplane;
2251
2252                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2253                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2254                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2255                                        BCM5708S_BLK_ADDR_TX_MISC);
2256                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2257                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2258                                        BCM5708S_BLK_ADDR_DIG);
2259                 }
2260         }
2261         return 0;
2262 }
2263
2264 static int
2265 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2266 {
2267         if (reset_phy)
2268                 bnx2_reset_phy(bp);
2269
2270         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2271
2272         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2273                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2274
2275         if (bp->dev->mtu > 1500) {
2276                 u32 val;
2277
2278                 /* Set extended packet length bit */
2279                 bnx2_write_phy(bp, 0x18, 0x7);
2280                 bnx2_read_phy(bp, 0x18, &val);
2281                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2282
2283                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2284                 bnx2_read_phy(bp, 0x1c, &val);
2285                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2286         }
2287         else {
2288                 u32 val;
2289
2290                 bnx2_write_phy(bp, 0x18, 0x7);
2291                 bnx2_read_phy(bp, 0x18, &val);
2292                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2293
2294                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2295                 bnx2_read_phy(bp, 0x1c, &val);
2296                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2297         }
2298
2299         return 0;
2300 }
2301
2302 static int
2303 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2304 {
2305         u32 val;
2306
2307         if (reset_phy)
2308                 bnx2_reset_phy(bp);
2309
2310         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2311                 bnx2_write_phy(bp, 0x18, 0x0c00);
2312                 bnx2_write_phy(bp, 0x17, 0x000a);
2313                 bnx2_write_phy(bp, 0x15, 0x310b);
2314                 bnx2_write_phy(bp, 0x17, 0x201f);
2315                 bnx2_write_phy(bp, 0x15, 0x9506);
2316                 bnx2_write_phy(bp, 0x17, 0x401f);
2317                 bnx2_write_phy(bp, 0x15, 0x14e2);
2318                 bnx2_write_phy(bp, 0x18, 0x0400);
2319         }
2320
2321         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2322                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2323                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2324                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2325                 val &= ~(1 << 8);
2326                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2327         }
2328
2329         if (bp->dev->mtu > 1500) {
2330                 /* Set extended packet length bit */
2331                 bnx2_write_phy(bp, 0x18, 0x7);
2332                 bnx2_read_phy(bp, 0x18, &val);
2333                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2334
2335                 bnx2_read_phy(bp, 0x10, &val);
2336                 bnx2_write_phy(bp, 0x10, val | 0x1);
2337         }
2338         else {
2339                 bnx2_write_phy(bp, 0x18, 0x7);
2340                 bnx2_read_phy(bp, 0x18, &val);
2341                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2342
2343                 bnx2_read_phy(bp, 0x10, &val);
2344                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2345         }
2346
2347         /* ethernet@wirespeed */
2348         bnx2_write_phy(bp, 0x18, 0x7007);
2349         bnx2_read_phy(bp, 0x18, &val);
2350         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2351         return 0;
2352 }
2353
2354
2355 static int
2356 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2357 __releases(&bp->phy_lock)
2358 __acquires(&bp->phy_lock)
2359 {
2360         u32 val;
2361         int rc = 0;
2362
2363         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2364         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2365
2366         bp->mii_bmcr = MII_BMCR;
2367         bp->mii_bmsr = MII_BMSR;
2368         bp->mii_bmsr1 = MII_BMSR;
2369         bp->mii_adv = MII_ADVERTISE;
2370         bp->mii_lpa = MII_LPA;
2371
2372         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2373
2374         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2375                 goto setup_phy;
2376
2377         bnx2_read_phy(bp, MII_PHYSID1, &val);
2378         bp->phy_id = val << 16;
2379         bnx2_read_phy(bp, MII_PHYSID2, &val);
2380         bp->phy_id |= val & 0xffff;
2381
2382         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2383                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2384                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2385                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2386                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2387                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2388                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2389         }
2390         else {
2391                 rc = bnx2_init_copper_phy(bp, reset_phy);
2392         }
2393
2394 setup_phy:
2395         if (!rc)
2396                 rc = bnx2_setup_phy(bp, bp->phy_port);
2397
2398         return rc;
2399 }
2400
2401 static int
2402 bnx2_set_mac_loopback(struct bnx2 *bp)
2403 {
2404         u32 mac_mode;
2405
2406         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2407         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2408         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2409         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2410         bp->link_up = 1;
2411         return 0;
2412 }
2413
2414 static int bnx2_test_link(struct bnx2 *);
2415
2416 static int
2417 bnx2_set_phy_loopback(struct bnx2 *bp)
2418 {
2419         u32 mac_mode;
2420         int rc, i;
2421
2422         spin_lock_bh(&bp->phy_lock);
2423         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2424                             BMCR_SPEED1000);
2425         spin_unlock_bh(&bp->phy_lock);
2426         if (rc)
2427                 return rc;
2428
2429         for (i = 0; i < 10; i++) {
2430                 if (bnx2_test_link(bp) == 0)
2431                         break;
2432                 msleep(100);
2433         }
2434
2435         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2436         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2437                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2438                       BNX2_EMAC_MODE_25G_MODE);
2439
2440         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2441         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2442         bp->link_up = 1;
2443         return 0;
2444 }
2445
2446 static int
2447 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2448 {
2449         int i;
2450         u32 val;
2451
2452         bp->fw_wr_seq++;
2453         msg_data |= bp->fw_wr_seq;
2454
2455         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2456
2457         if (!ack)
2458                 return 0;
2459
2460         /* wait for an acknowledgement. */
2461         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2462                 msleep(10);
2463
2464                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2465
2466                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2467                         break;
2468         }
2469         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2470                 return 0;
2471
2472         /* If we timed out, inform the firmware that this is the case. */
2473         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2474                 if (!silent)
2475                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2476
2477                 msg_data &= ~BNX2_DRV_MSG_CODE;
2478                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2479
2480                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2481
2482                 return -EBUSY;
2483         }
2484
2485         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2486                 return -EIO;
2487
2488         return 0;
2489 }
2490
2491 static int
2492 bnx2_init_5709_context(struct bnx2 *bp)
2493 {
2494         int i, ret = 0;
2495         u32 val;
2496
2497         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2498         val |= (BCM_PAGE_BITS - 8) << 16;
2499         REG_WR(bp, BNX2_CTX_COMMAND, val);
2500         for (i = 0; i < 10; i++) {
2501                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2502                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2503                         break;
2504                 udelay(2);
2505         }
2506         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2507                 return -EBUSY;
2508
2509         for (i = 0; i < bp->ctx_pages; i++) {
2510                 int j;
2511
2512                 if (bp->ctx_blk[i])
2513                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2514                 else
2515                         return -ENOMEM;
2516
2517                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2518                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2519                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2520                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2521                        (u64) bp->ctx_blk_mapping[i] >> 32);
2522                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2523                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2524                 for (j = 0; j < 10; j++) {
2525
2526                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2527                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2528                                 break;
2529                         udelay(5);
2530                 }
2531                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2532                         ret = -EBUSY;
2533                         break;
2534                 }
2535         }
2536         return ret;
2537 }
2538
2539 static void
2540 bnx2_init_context(struct bnx2 *bp)
2541 {
2542         u32 vcid;
2543
2544         vcid = 96;
2545         while (vcid) {
2546                 u32 vcid_addr, pcid_addr, offset;
2547                 int i;
2548
2549                 vcid--;
2550
2551                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2552                         u32 new_vcid;
2553
2554                         vcid_addr = GET_PCID_ADDR(vcid);
2555                         if (vcid & 0x8) {
2556                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2557                         }
2558                         else {
2559                                 new_vcid = vcid;
2560                         }
2561                         pcid_addr = GET_PCID_ADDR(new_vcid);
2562                 }
2563                 else {
2564                         vcid_addr = GET_CID_ADDR(vcid);
2565                         pcid_addr = vcid_addr;
2566                 }
2567
2568                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2569                         vcid_addr += (i << PHY_CTX_SHIFT);
2570                         pcid_addr += (i << PHY_CTX_SHIFT);
2571
2572                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2573                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2574
2575                         /* Zero out the context. */
2576                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2577                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2578                 }
2579         }
2580 }
2581
2582 static int
2583 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2584 {
2585         u16 *good_mbuf;
2586         u32 good_mbuf_cnt;
2587         u32 val;
2588
2589         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2590         if (good_mbuf == NULL) {
2591                 pr_err("Failed to allocate memory in %s\n", __func__);
2592                 return -ENOMEM;
2593         }
2594
2595         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2596                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2597
2598         good_mbuf_cnt = 0;
2599
2600         /* Allocate a bunch of mbufs and save the good ones in an array. */
2601         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2602         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2603                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2604                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2605
2606                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2607
2608                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2609
2610                 /* The addresses with Bit 9 set are bad memory blocks. */
2611                 if (!(val & (1 << 9))) {
2612                         good_mbuf[good_mbuf_cnt] = (u16) val;
2613                         good_mbuf_cnt++;
2614                 }
2615
2616                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2617         }
2618
2619         /* Free the good ones back to the mbuf pool thus discarding
2620          * all the bad ones. */
2621         while (good_mbuf_cnt) {
2622                 good_mbuf_cnt--;
2623
2624                 val = good_mbuf[good_mbuf_cnt];
2625                 val = (val << 9) | val | 1;
2626
2627                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2628         }
2629         kfree(good_mbuf);
2630         return 0;
2631 }
2632
2633 static void
2634 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2635 {
2636         u32 val;
2637
2638         val = (mac_addr[0] << 8) | mac_addr[1];
2639
2640         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2641
2642         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2643                 (mac_addr[4] << 8) | mac_addr[5];
2644
2645         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2646 }
2647
2648 static inline int
2649 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2650 {
2651         dma_addr_t mapping;
2652         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2653         struct rx_bd *rxbd =
2654                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2655         struct page *page = alloc_page(gfp);
2656
2657         if (!page)
2658                 return -ENOMEM;
2659         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2660                                PCI_DMA_FROMDEVICE);
2661         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2662                 __free_page(page);
2663                 return -EIO;
2664         }
2665
2666         rx_pg->page = page;
2667         dma_unmap_addr_set(rx_pg, mapping, mapping);
2668         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2669         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2670         return 0;
2671 }
2672
2673 static void
2674 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2675 {
2676         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2677         struct page *page = rx_pg->page;
2678
2679         if (!page)
2680                 return;
2681
2682         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2683                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2684
2685         __free_page(page);
2686         rx_pg->page = NULL;
2687 }
2688
2689 static inline int
2690 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2691 {
2692         struct sk_buff *skb;
2693         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2694         dma_addr_t mapping;
2695         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2696         unsigned long align;
2697
2698         skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2699         if (skb == NULL) {
2700                 return -ENOMEM;
2701         }
2702
2703         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2704                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2705
2706         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2707                                  PCI_DMA_FROMDEVICE);
2708         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2709                 dev_kfree_skb(skb);
2710                 return -EIO;
2711         }
2712
2713         rx_buf->skb = skb;
2714         rx_buf->desc = (struct l2_fhdr *) skb->data;
2715         dma_unmap_addr_set(rx_buf, mapping, mapping);
2716
2717         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2718         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2719
2720         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2721
2722         return 0;
2723 }
2724
2725 static int
2726 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2727 {
2728         struct status_block *sblk = bnapi->status_blk.msi;
2729         u32 new_link_state, old_link_state;
2730         int is_set = 1;
2731
2732         new_link_state = sblk->status_attn_bits & event;
2733         old_link_state = sblk->status_attn_bits_ack & event;
2734         if (new_link_state != old_link_state) {
2735                 if (new_link_state)
2736                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2737                 else
2738                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2739         } else
2740                 is_set = 0;
2741
2742         return is_set;
2743 }
2744
2745 static void
2746 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2747 {
2748         spin_lock(&bp->phy_lock);
2749
2750         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2751                 bnx2_set_link(bp);
2752         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2753                 bnx2_set_remote_link(bp);
2754
2755         spin_unlock(&bp->phy_lock);
2756
2757 }
2758
2759 static inline u16
2760 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2761 {
2762         u16 cons;
2763
2764         /* Tell compiler that status block fields can change. */
2765         barrier();
2766         cons = *bnapi->hw_tx_cons_ptr;
2767         barrier();
2768         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2769                 cons++;
2770         return cons;
2771 }
2772
2773 static int
2774 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2775 {
2776         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2777         u16 hw_cons, sw_cons, sw_ring_cons;
2778         int tx_pkt = 0, index;
2779         struct netdev_queue *txq;
2780
2781         index = (bnapi - bp->bnx2_napi);
2782         txq = netdev_get_tx_queue(bp->dev, index);
2783
2784         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2785         sw_cons = txr->tx_cons;
2786
2787         while (sw_cons != hw_cons) {
2788                 struct sw_tx_bd *tx_buf;
2789                 struct sk_buff *skb;
2790                 int i, last;
2791
2792                 sw_ring_cons = TX_RING_IDX(sw_cons);
2793
2794                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2795                 skb = tx_buf->skb;
2796
2797                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2798                 prefetch(&skb->end);
2799
2800                 /* partial BD completions possible with TSO packets */
2801                 if (tx_buf->is_gso) {
2802                         u16 last_idx, last_ring_idx;
2803
2804                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2805                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2806                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2807                                 last_idx++;
2808                         }
2809                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2810                                 break;
2811                         }
2812                 }
2813
2814                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2815                         skb_headlen(skb), PCI_DMA_TODEVICE);
2816
2817                 tx_buf->skb = NULL;
2818                 last = tx_buf->nr_frags;
2819
2820                 for (i = 0; i < last; i++) {
2821                         sw_cons = NEXT_TX_BD(sw_cons);
2822
2823                         dma_unmap_page(&bp->pdev->dev,
2824                                 dma_unmap_addr(
2825                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2826                                         mapping),
2827                                 skb_shinfo(skb)->frags[i].size,
2828                                 PCI_DMA_TODEVICE);
2829                 }
2830
2831                 sw_cons = NEXT_TX_BD(sw_cons);
2832
2833                 dev_kfree_skb(skb);
2834                 tx_pkt++;
2835                 if (tx_pkt == budget)
2836                         break;
2837
2838                 if (hw_cons == sw_cons)
2839                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2840         }
2841
2842         txr->hw_tx_cons = hw_cons;
2843         txr->tx_cons = sw_cons;
2844
2845         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2846          * before checking for netif_tx_queue_stopped().  Without the
2847          * memory barrier, there is a small possibility that bnx2_start_xmit()
2848          * will miss it and cause the queue to be stopped forever.
2849          */
2850         smp_mb();
2851
2852         if (unlikely(netif_tx_queue_stopped(txq)) &&
2853                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2854                 __netif_tx_lock(txq, smp_processor_id());
2855                 if ((netif_tx_queue_stopped(txq)) &&
2856                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2857                         netif_tx_wake_queue(txq);
2858                 __netif_tx_unlock(txq);
2859         }
2860
2861         return tx_pkt;
2862 }
2863
2864 static void
2865 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2866                         struct sk_buff *skb, int count)
2867 {
2868         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2869         struct rx_bd *cons_bd, *prod_bd;
2870         int i;
2871         u16 hw_prod, prod;
2872         u16 cons = rxr->rx_pg_cons;
2873
2874         cons_rx_pg = &rxr->rx_pg_ring[cons];
2875
2876         /* The caller was unable to allocate a new page to replace the
2877          * last one in the frags array, so we need to recycle that page
2878          * and then free the skb.
2879          */
2880         if (skb) {
2881                 struct page *page;
2882                 struct skb_shared_info *shinfo;
2883
2884                 shinfo = skb_shinfo(skb);
2885                 shinfo->nr_frags--;
2886                 page = shinfo->frags[shinfo->nr_frags].page;
2887                 shinfo->frags[shinfo->nr_frags].page = NULL;
2888
2889                 cons_rx_pg->page = page;
2890                 dev_kfree_skb(skb);
2891         }
2892
2893         hw_prod = rxr->rx_pg_prod;
2894
2895         for (i = 0; i < count; i++) {
2896                 prod = RX_PG_RING_IDX(hw_prod);
2897
2898                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2899                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2900                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2901                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2902
2903                 if (prod != cons) {
2904                         prod_rx_pg->page = cons_rx_pg->page;
2905                         cons_rx_pg->page = NULL;
2906                         dma_unmap_addr_set(prod_rx_pg, mapping,
2907                                 dma_unmap_addr(cons_rx_pg, mapping));
2908
2909                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2910                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2911
2912                 }
2913                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2914                 hw_prod = NEXT_RX_BD(hw_prod);
2915         }
2916         rxr->rx_pg_prod = hw_prod;
2917         rxr->rx_pg_cons = cons;
2918 }
2919
2920 static inline void
2921 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2922                   struct sk_buff *skb, u16 cons, u16 prod)
2923 {
2924         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2925         struct rx_bd *cons_bd, *prod_bd;
2926
2927         cons_rx_buf = &rxr->rx_buf_ring[cons];
2928         prod_rx_buf = &rxr->rx_buf_ring[prod];
2929
2930         dma_sync_single_for_device(&bp->pdev->dev,
2931                 dma_unmap_addr(cons_rx_buf, mapping),
2932                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2933
2934         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2935
2936         prod_rx_buf->skb = skb;
2937         prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2938
2939         if (cons == prod)
2940                 return;
2941
2942         dma_unmap_addr_set(prod_rx_buf, mapping,
2943                         dma_unmap_addr(cons_rx_buf, mapping));
2944
2945         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2946         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2947         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2948         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2949 }
2950
2951 static int
2952 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2953             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2954             u32 ring_idx)
2955 {
2956         int err;
2957         u16 prod = ring_idx & 0xffff;
2958
2959         err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
2960         if (unlikely(err)) {
2961                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2962                 if (hdr_len) {
2963                         unsigned int raw_len = len + 4;
2964                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2965
2966                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2967                 }
2968                 return err;
2969         }
2970
2971         skb_reserve(skb, BNX2_RX_OFFSET);
2972         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
2973                          PCI_DMA_FROMDEVICE);
2974
2975         if (hdr_len == 0) {
2976                 skb_put(skb, len);
2977                 return 0;
2978         } else {
2979                 unsigned int i, frag_len, frag_size, pages;
2980                 struct sw_pg *rx_pg;
2981                 u16 pg_cons = rxr->rx_pg_cons;
2982                 u16 pg_prod = rxr->rx_pg_prod;
2983
2984                 frag_size = len + 4 - hdr_len;
2985                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2986                 skb_put(skb, hdr_len);
2987
2988                 for (i = 0; i < pages; i++) {
2989                         dma_addr_t mapping_old;
2990
2991                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2992                         if (unlikely(frag_len <= 4)) {
2993                                 unsigned int tail = 4 - frag_len;
2994
2995                                 rxr->rx_pg_cons = pg_cons;
2996                                 rxr->rx_pg_prod = pg_prod;
2997                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2998                                                         pages - i);
2999                                 skb->len -= tail;
3000                                 if (i == 0) {
3001                                         skb->tail -= tail;
3002                                 } else {
3003                                         skb_frag_t *frag =
3004                                                 &skb_shinfo(skb)->frags[i - 1];
3005                                         frag->size -= tail;
3006                                         skb->data_len -= tail;
3007                                         skb->truesize -= tail;
3008                                 }
3009                                 return 0;
3010                         }
3011                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3012
3013                         /* Don't unmap yet.  If we're unable to allocate a new
3014                          * page, we need to recycle the page and the DMA addr.
3015                          */
3016                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3017                         if (i == pages - 1)
3018                                 frag_len -= 4;
3019
3020                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3021                         rx_pg->page = NULL;
3022
3023                         err = bnx2_alloc_rx_page(bp, rxr,
3024                                                  RX_PG_RING_IDX(pg_prod),
3025                                                  GFP_ATOMIC);
3026                         if (unlikely(err)) {
3027                                 rxr->rx_pg_cons = pg_cons;
3028                                 rxr->rx_pg_prod = pg_prod;
3029                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3030                                                         pages - i);
3031                                 return err;
3032                         }
3033
3034                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3035                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3036
3037                         frag_size -= frag_len;
3038                         skb->data_len += frag_len;
3039                         skb->truesize += frag_len;
3040                         skb->len += frag_len;
3041
3042                         pg_prod = NEXT_RX_BD(pg_prod);
3043                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3044                 }
3045                 rxr->rx_pg_prod = pg_prod;
3046                 rxr->rx_pg_cons = pg_cons;
3047         }
3048         return 0;
3049 }
3050
3051 static inline u16
3052 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3053 {
3054         u16 cons;
3055
3056         /* Tell compiler that status block fields can change. */
3057         barrier();
3058         cons = *bnapi->hw_rx_cons_ptr;
3059         barrier();
3060         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3061                 cons++;
3062         return cons;
3063 }
3064
3065 static int
3066 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3067 {
3068         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3069         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3070         struct l2_fhdr *rx_hdr;
3071         int rx_pkt = 0, pg_ring_used = 0;
3072
3073         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3074         sw_cons = rxr->rx_cons;
3075         sw_prod = rxr->rx_prod;
3076
3077         /* Memory barrier necessary as speculative reads of the rx
3078          * buffer can be ahead of the index in the status block
3079          */
3080         rmb();
3081         while (sw_cons != hw_cons) {
3082                 unsigned int len, hdr_len;
3083                 u32 status;
3084                 struct sw_bd *rx_buf, *next_rx_buf;
3085                 struct sk_buff *skb;
3086                 dma_addr_t dma_addr;
3087
3088                 sw_ring_cons = RX_RING_IDX(sw_cons);
3089                 sw_ring_prod = RX_RING_IDX(sw_prod);
3090
3091                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3092                 skb = rx_buf->skb;
3093                 prefetchw(skb);
3094
3095                 next_rx_buf =
3096                         &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3097                 prefetch(next_rx_buf->desc);
3098
3099                 rx_buf->skb = NULL;
3100
3101                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3102
3103                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3104                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3105                         PCI_DMA_FROMDEVICE);
3106
3107                 rx_hdr = rx_buf->desc;
3108                 len = rx_hdr->l2_fhdr_pkt_len;
3109                 status = rx_hdr->l2_fhdr_status;
3110
3111                 hdr_len = 0;
3112                 if (status & L2_FHDR_STATUS_SPLIT) {
3113                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3114                         pg_ring_used = 1;
3115                 } else if (len > bp->rx_jumbo_thresh) {
3116                         hdr_len = bp->rx_jumbo_thresh;
3117                         pg_ring_used = 1;
3118                 }
3119
3120                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3121                                        L2_FHDR_ERRORS_PHY_DECODE |
3122                                        L2_FHDR_ERRORS_ALIGNMENT |
3123                                        L2_FHDR_ERRORS_TOO_SHORT |
3124                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3125
3126                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3127                                           sw_ring_prod);
3128                         if (pg_ring_used) {
3129                                 int pages;
3130
3131                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3132
3133                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3134                         }
3135                         goto next_rx;
3136                 }
3137
3138                 len -= 4;
3139
3140                 if (len <= bp->rx_copy_thresh) {
3141                         struct sk_buff *new_skb;
3142
3143                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3144                         if (new_skb == NULL) {
3145                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3146                                                   sw_ring_prod);
3147                                 goto next_rx;
3148                         }
3149
3150                         /* aligned copy */
3151                         skb_copy_from_linear_data_offset(skb,
3152                                                          BNX2_RX_OFFSET - 6,
3153                                       new_skb->data, len + 6);
3154                         skb_reserve(new_skb, 6);
3155                         skb_put(new_skb, len);
3156
3157                         bnx2_reuse_rx_skb(bp, rxr, skb,
3158                                 sw_ring_cons, sw_ring_prod);
3159
3160                         skb = new_skb;
3161                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3162                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3163                         goto next_rx;
3164
3165                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3166                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3167                         __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3168
3169                 skb->protocol = eth_type_trans(skb, bp->dev);
3170
3171                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3172                         (ntohs(skb->protocol) != 0x8100)) {
3173
3174                         dev_kfree_skb(skb);
3175                         goto next_rx;
3176
3177                 }
3178
3179                 skb_checksum_none_assert(skb);
3180                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3181                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3182                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3183
3184                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3185                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3186                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3187                 }
3188                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3189                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3190                      L2_FHDR_STATUS_USE_RXHASH))
3191                         skb->rxhash = rx_hdr->l2_fhdr_hash;
3192
3193                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3194                 napi_gro_receive(&bnapi->napi, skb);
3195                 rx_pkt++;
3196
3197 next_rx:
3198                 sw_cons = NEXT_RX_BD(sw_cons);
3199                 sw_prod = NEXT_RX_BD(sw_prod);
3200
3201                 if ((rx_pkt == budget))
3202                         break;
3203
3204                 /* Refresh hw_cons to see if there is new work */
3205                 if (sw_cons == hw_cons) {
3206                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3207                         rmb();
3208                 }
3209         }
3210         rxr->rx_cons = sw_cons;
3211         rxr->rx_prod = sw_prod;
3212
3213         if (pg_ring_used)
3214                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3215
3216         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3217
3218         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3219
3220         mmiowb();
3221
3222         return rx_pkt;
3223
3224 }
3225
3226 /* MSI ISR - The only difference between this and the INTx ISR
3227  * is that the MSI interrupt is always serviced.
3228  */
3229 static irqreturn_t
3230 bnx2_msi(int irq, void *dev_instance)
3231 {
3232         struct bnx2_napi *bnapi = dev_instance;
3233         struct bnx2 *bp = bnapi->bp;
3234
3235         prefetch(bnapi->status_blk.msi);
3236         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3237                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3238                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3239
3240         /* Return here if interrupt is disabled. */
3241         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3242                 return IRQ_HANDLED;
3243
3244         napi_schedule(&bnapi->napi);
3245
3246         return IRQ_HANDLED;
3247 }
3248
3249 static irqreturn_t
3250 bnx2_msi_1shot(int irq, void *dev_instance)
3251 {
3252         struct bnx2_napi *bnapi = dev_instance;
3253         struct bnx2 *bp = bnapi->bp;
3254
3255         prefetch(bnapi->status_blk.msi);
3256
3257         /* Return here if interrupt is disabled. */
3258         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3259                 return IRQ_HANDLED;
3260
3261         napi_schedule(&bnapi->napi);
3262
3263         return IRQ_HANDLED;
3264 }
3265
3266 static irqreturn_t
3267 bnx2_interrupt(int irq, void *dev_instance)
3268 {
3269         struct bnx2_napi *bnapi = dev_instance;
3270         struct bnx2 *bp = bnapi->bp;
3271         struct status_block *sblk = bnapi->status_blk.msi;
3272
3273         /* When using INTx, it is possible for the interrupt to arrive
3274          * at the CPU before the status block posted prior to the
3275          * interrupt. Reading a register will flush the status block.
3276          * When using MSI, the MSI message will always complete after
3277          * the status block write.
3278          */
3279         if ((sblk->status_idx == bnapi->last_status_idx) &&
3280             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3281              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3282                 return IRQ_NONE;
3283
3284         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3285                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3286                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3287
3288         /* Read back to deassert IRQ immediately to avoid too many
3289          * spurious interrupts.
3290          */
3291         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3292
3293         /* Return here if interrupt is shared and is disabled. */
3294         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3295                 return IRQ_HANDLED;
3296
3297         if (napi_schedule_prep(&bnapi->napi)) {
3298                 bnapi->last_status_idx = sblk->status_idx;
3299                 __napi_schedule(&bnapi->napi);
3300         }
3301
3302         return IRQ_HANDLED;
3303 }
3304
3305 static inline int
3306 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3307 {
3308         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3309         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3310
3311         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3312             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3313                 return 1;
3314         return 0;
3315 }
3316
3317 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3318                                  STATUS_ATTN_BITS_TIMER_ABORT)
3319
3320 static inline int
3321 bnx2_has_work(struct bnx2_napi *bnapi)
3322 {
3323         struct status_block *sblk = bnapi->status_blk.msi;
3324
3325         if (bnx2_has_fast_work(bnapi))
3326                 return 1;
3327
3328 #ifdef BCM_CNIC
3329         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3330                 return 1;
3331 #endif
3332
3333         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3334             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3335                 return 1;
3336
3337         return 0;
3338 }
3339
3340 static void
3341 bnx2_chk_missed_msi(struct bnx2 *bp)
3342 {
3343         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3344         u32 msi_ctrl;
3345
3346         if (bnx2_has_work(bnapi)) {
3347                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3348                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3349                         return;
3350
3351                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3352                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3353                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3354                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3355                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3356                 }
3357         }
3358
3359         bp->idle_chk_status_idx = bnapi->last_status_idx;
3360 }
3361
3362 #ifdef BCM_CNIC
3363 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3364 {
3365         struct cnic_ops *c_ops;
3366
3367         if (!bnapi->cnic_present)
3368                 return;
3369
3370         rcu_read_lock();
3371         c_ops = rcu_dereference(bp->cnic_ops);
3372         if (c_ops)
3373                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3374                                                       bnapi->status_blk.msi);
3375         rcu_read_unlock();
3376 }
3377 #endif
3378
3379 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3380 {
3381         struct status_block *sblk = bnapi->status_blk.msi;
3382         u32 status_attn_bits = sblk->status_attn_bits;
3383         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3384
3385         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3386             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3387
3388                 bnx2_phy_int(bp, bnapi);
3389
3390                 /* This is needed to take care of transient status
3391                  * during link changes.
3392                  */
3393                 REG_WR(bp, BNX2_HC_COMMAND,
3394                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3395                 REG_RD(bp, BNX2_HC_COMMAND);
3396         }
3397 }
3398
3399 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3400                           int work_done, int budget)
3401 {
3402         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3403         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3404
3405         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3406                 bnx2_tx_int(bp, bnapi, 0);
3407
3408         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3409                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3410
3411         return work_done;
3412 }
3413
3414 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3415 {
3416         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3417         struct bnx2 *bp = bnapi->bp;
3418         int work_done = 0;
3419         struct status_block_msix *sblk = bnapi->status_blk.msix;
3420
3421         while (1) {
3422                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3423                 if (unlikely(work_done >= budget))
3424                         break;
3425
3426                 bnapi->last_status_idx = sblk->status_idx;
3427                 /* status idx must be read before checking for more work. */
3428                 rmb();
3429                 if (likely(!bnx2_has_fast_work(bnapi))) {
3430
3431                         napi_complete(napi);
3432                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3433                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3434                                bnapi->last_status_idx);
3435                         break;
3436                 }
3437         }
3438         return work_done;
3439 }
3440
3441 static int bnx2_poll(struct napi_struct *napi, int budget)
3442 {
3443         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3444         struct bnx2 *bp = bnapi->bp;
3445         int work_done = 0;
3446         struct status_block *sblk = bnapi->status_blk.msi;
3447
3448         while (1) {
3449                 bnx2_poll_link(bp, bnapi);
3450
3451                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3452
3453 #ifdef BCM_CNIC
3454                 bnx2_poll_cnic(bp, bnapi);
3455 #endif
3456
3457                 /* bnapi->last_status_idx is used below to tell the hw how
3458                  * much work has been processed, so we must read it before
3459                  * checking for more work.
3460                  */
3461                 bnapi->last_status_idx = sblk->status_idx;
3462
3463                 if (unlikely(work_done >= budget))
3464                         break;
3465
3466                 rmb();
3467                 if (likely(!bnx2_has_work(bnapi))) {
3468                         napi_complete(napi);
3469                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3470                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3471                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3472                                        bnapi->last_status_idx);
3473                                 break;
3474                         }
3475                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3476                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3477                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3478                                bnapi->last_status_idx);
3479
3480                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3481                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3482                                bnapi->last_status_idx);
3483                         break;
3484                 }
3485         }
3486
3487         return work_done;
3488 }
3489
3490 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3491  * from set_multicast.
3492  */
3493 static void
3494 bnx2_set_rx_mode(struct net_device *dev)
3495 {
3496         struct bnx2 *bp = netdev_priv(dev);
3497         u32 rx_mode, sort_mode;
3498         struct netdev_hw_addr *ha;
3499         int i;
3500
3501         if (!netif_running(dev))
3502                 return;
3503
3504         spin_lock_bh(&bp->phy_lock);
3505
3506         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3507                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3508         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3509         if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3510              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3511                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3512         if (dev->flags & IFF_PROMISC) {
3513                 /* Promiscuous mode. */
3514                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3515                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3516                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3517         }
3518         else if (dev->flags & IFF_ALLMULTI) {
3519                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3520                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3521                                0xffffffff);
3522                 }
3523                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3524         }
3525         else {
3526                 /* Accept one or more multicast(s). */
3527                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3528                 u32 regidx;
3529                 u32 bit;
3530                 u32 crc;
3531
3532                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3533
3534                 netdev_for_each_mc_addr(ha, dev) {
3535                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3536                         bit = crc & 0xff;
3537                         regidx = (bit & 0xe0) >> 5;
3538                         bit &= 0x1f;
3539                         mc_filter[regidx] |= (1 << bit);
3540                 }
3541
3542                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3543                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3544                                mc_filter[i]);
3545                 }
3546
3547                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3548         }
3549
3550         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3551                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3552                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3553                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3554         } else if (!(dev->flags & IFF_PROMISC)) {
3555                 /* Add all entries into to the match filter list */
3556                 i = 0;
3557                 netdev_for_each_uc_addr(ha, dev) {
3558                         bnx2_set_mac_addr(bp, ha->addr,
3559                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3560                         sort_mode |= (1 <<
3561                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3562                         i++;
3563                 }
3564
3565         }
3566
3567         if (rx_mode != bp->rx_mode) {
3568                 bp->rx_mode = rx_mode;
3569                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3570         }
3571
3572         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3573         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3574         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3575
3576         spin_unlock_bh(&bp->phy_lock);
3577 }
3578
3579 static int __devinit
3580 check_fw_section(const struct firmware *fw,
3581                  const struct bnx2_fw_file_section *section,
3582                  u32 alignment, bool non_empty)
3583 {
3584         u32 offset = be32_to_cpu(section->offset);
3585         u32 len = be32_to_cpu(section->len);
3586
3587         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3588                 return -EINVAL;
3589         if ((non_empty && len == 0) || len > fw->size - offset ||
3590             len & (alignment - 1))
3591                 return -EINVAL;
3592         return 0;
3593 }
3594
3595 static int __devinit
3596 check_mips_fw_entry(const struct firmware *fw,
3597                     const struct bnx2_mips_fw_file_entry *entry)
3598 {
3599         if (check_fw_section(fw, &entry->text, 4, true) ||
3600             check_fw_section(fw, &entry->data, 4, false) ||
3601             check_fw_section(fw, &entry->rodata, 4, false))
3602                 return -EINVAL;
3603         return 0;
3604 }
3605
3606 static int __devinit
3607 bnx2_request_firmware(struct bnx2 *bp)
3608 {
3609         const char *mips_fw_file, *rv2p_fw_file;
3610         const struct bnx2_mips_fw_file *mips_fw;
3611         const struct bnx2_rv2p_fw_file *rv2p_fw;
3612         int rc;
3613
3614         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3615                 mips_fw_file = FW_MIPS_FILE_09;
3616                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3617                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3618                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3619                 else
3620                         rv2p_fw_file = FW_RV2P_FILE_09;
3621         } else {
3622                 mips_fw_file = FW_MIPS_FILE_06;
3623                 rv2p_fw_file = FW_RV2P_FILE_06;
3624         }
3625
3626         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3627         if (rc) {
3628                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3629                 return rc;
3630         }
3631
3632         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3633         if (rc) {
3634                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3635                 return rc;
3636         }
3637         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3638         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3639         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3640             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3641             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3642             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3643             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3644             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3645                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3646                 return -EINVAL;
3647         }
3648         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3649             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3650             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3651                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3652                 return -EINVAL;
3653         }
3654
3655         return 0;
3656 }
3657
3658 static u32
3659 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3660 {
3661         switch (idx) {
3662         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3663                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3664                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3665                 break;
3666         }
3667         return rv2p_code;
3668 }
3669
3670 static int
3671 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3672              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3673 {
3674         u32 rv2p_code_len, file_offset;
3675         __be32 *rv2p_code;
3676         int i;
3677         u32 val, cmd, addr;
3678
3679         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3680         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3681
3682         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3683
3684         if (rv2p_proc == RV2P_PROC1) {
3685                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3686                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3687         } else {
3688                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3689                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3690         }
3691
3692         for (i = 0; i < rv2p_code_len; i += 8) {
3693                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3694                 rv2p_code++;
3695                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3696                 rv2p_code++;
3697
3698                 val = (i / 8) | cmd;
3699                 REG_WR(bp, addr, val);
3700         }
3701
3702         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3703         for (i = 0; i < 8; i++) {
3704                 u32 loc, code;
3705
3706                 loc = be32_to_cpu(fw_entry->fixup[i]);
3707                 if (loc && ((loc * 4) < rv2p_code_len)) {
3708                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3709                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3710                         code = be32_to_cpu(*(rv2p_code + loc));
3711                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3712                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3713
3714                         val = (loc / 2) | cmd;
3715                         REG_WR(bp, addr, val);
3716                 }
3717         }
3718
3719         /* Reset the processor, un-stall is done later. */
3720         if (rv2p_proc == RV2P_PROC1) {
3721                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3722         }
3723         else {
3724                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3725         }
3726
3727         return 0;
3728 }
3729
3730 static int
3731 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3732             const struct bnx2_mips_fw_file_entry *fw_entry)
3733 {
3734         u32 addr, len, file_offset;
3735         __be32 *data;
3736         u32 offset;
3737         u32 val;
3738
3739         /* Halt the CPU. */
3740         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3741         val |= cpu_reg->mode_value_halt;
3742         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3743         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3744
3745         /* Load the Text area. */
3746         addr = be32_to_cpu(fw_entry->text.addr);
3747         len = be32_to_cpu(fw_entry->text.len);
3748         file_offset = be32_to_cpu(fw_entry->text.offset);
3749         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3750
3751         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3752         if (len) {
3753                 int j;
3754
3755                 for (j = 0; j < (len / 4); j++, offset += 4)
3756                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3757         }
3758
3759         /* Load the Data area. */
3760         addr = be32_to_cpu(fw_entry->data.addr);
3761         len = be32_to_cpu(fw_entry->data.len);
3762         file_offset = be32_to_cpu(fw_entry->data.offset);
3763         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3764
3765         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3766         if (len) {
3767                 int j;
3768
3769                 for (j = 0; j < (len / 4); j++, offset += 4)
3770                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3771         }
3772
3773         /* Load the Read-Only area. */
3774         addr = be32_to_cpu(fw_entry->rodata.addr);
3775         len = be32_to_cpu(fw_entry->rodata.len);
3776         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3777         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3778
3779         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3780         if (len) {
3781                 int j;
3782
3783                 for (j = 0; j < (len / 4); j++, offset += 4)
3784                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3785         }
3786
3787         /* Clear the pre-fetch instruction. */
3788         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3789
3790         val = be32_to_cpu(fw_entry->start_addr);
3791         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3792
3793         /* Start the CPU. */
3794         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3795         val &= ~cpu_reg->mode_value_halt;
3796         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3797         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3798
3799         return 0;
3800 }
3801
3802 static int
3803 bnx2_init_cpus(struct bnx2 *bp)
3804 {
3805         const struct bnx2_mips_fw_file *mips_fw =
3806                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3807         const struct bnx2_rv2p_fw_file *rv2p_fw =
3808                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3809         int rc;
3810
3811         /* Initialize the RV2P processor. */
3812         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3813         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3814
3815         /* Initialize the RX Processor. */
3816         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3817         if (rc)
3818                 goto init_cpu_err;
3819
3820         /* Initialize the TX Processor. */
3821         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3822         if (rc)
3823                 goto init_cpu_err;
3824
3825         /* Initialize the TX Patch-up Processor. */
3826         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3827         if (rc)
3828                 goto init_cpu_err;
3829
3830         /* Initialize the Completion Processor. */
3831         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3832         if (rc)
3833                 goto init_cpu_err;
3834
3835         /* Initialize the Command Processor. */
3836         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3837
3838 init_cpu_err:
3839         return rc;
3840 }
3841
3842 static int
3843 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3844 {
3845         u16 pmcsr;
3846
3847         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3848
3849         switch (state) {
3850         case PCI_D0: {
3851                 u32 val;
3852
3853                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3854                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3855                         PCI_PM_CTRL_PME_STATUS);
3856
3857                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3858                         /* delay required during transition out of D3hot */
3859                         msleep(20);
3860
3861                 val = REG_RD(bp, BNX2_EMAC_MODE);
3862                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3863                 val &= ~BNX2_EMAC_MODE_MPKT;
3864                 REG_WR(bp, BNX2_EMAC_MODE, val);
3865
3866                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3867                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3868                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3869                 break;
3870         }
3871         case PCI_D3hot: {
3872                 int i;
3873                 u32 val, wol_msg;
3874
3875                 if (bp->wol) {
3876                         u32 advertising;
3877                         u8 autoneg;
3878
3879                         autoneg = bp->autoneg;
3880                         advertising = bp->advertising;
3881
3882                         if (bp->phy_port == PORT_TP) {
3883                                 bp->autoneg = AUTONEG_SPEED;
3884                                 bp->advertising = ADVERTISED_10baseT_Half |
3885                                         ADVERTISED_10baseT_Full |
3886                                         ADVERTISED_100baseT_Half |
3887                                         ADVERTISED_100baseT_Full |
3888                                         ADVERTISED_Autoneg;
3889                         }
3890
3891                         spin_lock_bh(&bp->phy_lock);
3892                         bnx2_setup_phy(bp, bp->phy_port);
3893                         spin_unlock_bh(&bp->phy_lock);
3894
3895                         bp->autoneg = autoneg;
3896                         bp->advertising = advertising;
3897
3898                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3899
3900                         val = REG_RD(bp, BNX2_EMAC_MODE);
3901
3902                         /* Enable port mode. */
3903                         val &= ~BNX2_EMAC_MODE_PORT;
3904                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3905                                BNX2_EMAC_MODE_ACPI_RCVD |
3906                                BNX2_EMAC_MODE_MPKT;
3907                         if (bp->phy_port == PORT_TP)
3908                                 val |= BNX2_EMAC_MODE_PORT_MII;
3909                         else {
3910                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3911                                 if (bp->line_speed == SPEED_2500)
3912                                         val |= BNX2_EMAC_MODE_25G_MODE;
3913                         }
3914
3915                         REG_WR(bp, BNX2_EMAC_MODE, val);
3916
3917                         /* receive all multicast */
3918                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3919                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3920                                        0xffffffff);
3921                         }
3922                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3923                                BNX2_EMAC_RX_MODE_SORT_MODE);
3924
3925                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3926                               BNX2_RPM_SORT_USER0_MC_EN;
3927                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3928                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3929                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3930                                BNX2_RPM_SORT_USER0_ENA);
3931
3932                         /* Need to enable EMAC and RPM for WOL. */
3933                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3934                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3935                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3936                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3937
3938                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3939                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3940                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3941
3942                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3943                 }
3944                 else {
3945                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3946                 }
3947
3948                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3949                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3950                                      1, 0);
3951
3952                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3953                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3954                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3955
3956                         if (bp->wol)
3957                                 pmcsr |= 3;
3958                 }
3959                 else {
3960                         pmcsr |= 3;
3961                 }
3962                 if (bp->wol) {
3963                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3964                 }
3965                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3966                                       pmcsr);
3967
3968                 /* No more memory access after this point until
3969                  * device is brought back to D0.
3970                  */
3971                 udelay(50);
3972                 break;
3973         }
3974         default:
3975                 return -EINVAL;
3976         }
3977         return 0;
3978 }
3979
3980 static int
3981 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3982 {
3983         u32 val;
3984         int j;
3985
3986         /* Request access to the flash interface. */
3987         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3988         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3989                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3990                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3991                         break;
3992
3993                 udelay(5);
3994         }
3995
3996         if (j >= NVRAM_TIMEOUT_COUNT)
3997                 return -EBUSY;
3998
3999         return 0;
4000 }
4001
4002 static int
4003 bnx2_release_nvram_lock(struct bnx2 *bp)
4004 {
4005         int j;
4006         u32 val;
4007
4008         /* Relinquish nvram interface. */
4009         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4010
4011         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4012                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4013                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4014                         break;
4015
4016                 udelay(5);
4017         }
4018
4019         if (j >= NVRAM_TIMEOUT_COUNT)
4020                 return -EBUSY;
4021
4022         return 0;
4023 }
4024
4025
4026 static int
4027 bnx2_enable_nvram_write(struct bnx2 *bp)
4028 {
4029         u32 val;
4030
4031         val = REG_RD(bp, BNX2_MISC_CFG);
4032         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4033
4034         if (bp->flash_info->flags & BNX2_NV_WREN) {
4035                 int j;
4036
4037                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4038                 REG_WR(bp, BNX2_NVM_COMMAND,
4039                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4040
4041                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4042                         udelay(5);
4043
4044                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4045                         if (val & BNX2_NVM_COMMAND_DONE)
4046                                 break;
4047                 }
4048
4049                 if (j >= NVRAM_TIMEOUT_COUNT)
4050                         return -EBUSY;
4051         }
4052         return 0;
4053 }
4054
4055 static void
4056 bnx2_disable_nvram_write(struct bnx2 *bp)
4057 {
4058         u32 val;
4059
4060         val = REG_RD(bp, BNX2_MISC_CFG);
4061         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4062 }
4063
4064
4065 static void
4066 bnx2_enable_nvram_access(struct bnx2 *bp)
4067 {
4068         u32 val;
4069
4070         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4071         /* Enable both bits, even on read. */
4072         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4073                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4074 }
4075
4076 static void
4077 bnx2_disable_nvram_access(struct bnx2 *bp)
4078 {
4079         u32 val;
4080
4081         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4082         /* Disable both bits, even after read. */
4083         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4084                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4085                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4086 }
4087
4088 static int
4089 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4090 {
4091         u32 cmd;
4092         int j;
4093
4094         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4095                 /* Buffered flash, no erase needed */
4096                 return 0;
4097
4098         /* Build an erase command */
4099         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4100               BNX2_NVM_COMMAND_DOIT;
4101
4102         /* Need to clear DONE bit separately. */
4103         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4104
4105         /* Address of the NVRAM to read from. */
4106         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4107
4108         /* Issue an erase command. */
4109         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4110
4111         /* Wait for completion. */
4112         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4113                 u32 val;
4114
4115                 udelay(5);
4116
4117                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4118                 if (val & BNX2_NVM_COMMAND_DONE)
4119                         break;
4120         }
4121
4122         if (j >= NVRAM_TIMEOUT_COUNT)
4123                 return -EBUSY;
4124
4125         return 0;
4126 }
4127
4128 static int
4129 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4130 {
4131         u32 cmd;
4132         int j;
4133
4134         /* Build the command word. */
4135         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4136
4137         /* Calculate an offset of a buffered flash, not needed for 5709. */
4138         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4139                 offset = ((offset / bp->flash_info->page_size) <<
4140                            bp->flash_info->page_bits) +
4141                           (offset % bp->flash_info->page_size);
4142         }
4143
4144         /* Need to clear DONE bit separately. */
4145         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4146
4147         /* Address of the NVRAM to read from. */
4148         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4149
4150         /* Issue a read command. */
4151         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4152
4153         /* Wait for completion. */
4154         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4155                 u32 val;
4156
4157                 udelay(5);
4158
4159                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4160                 if (val & BNX2_NVM_COMMAND_DONE) {
4161                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4162                         memcpy(ret_val, &v, 4);
4163                         break;
4164                 }
4165         }
4166         if (j >= NVRAM_TIMEOUT_COUNT)
4167                 return -EBUSY;
4168
4169         return 0;
4170 }
4171
4172
4173 static int
4174 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4175 {
4176         u32 cmd;
4177         __be32 val32;
4178         int j;
4179
4180         /* Build the command word. */
4181         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4182
4183         /* Calculate an offset of a buffered flash, not needed for 5709. */
4184         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4185                 offset = ((offset / bp->flash_info->page_size) <<
4186                           bp->flash_info->page_bits) +
4187                          (offset % bp->flash_info->page_size);
4188         }
4189
4190         /* Need to clear DONE bit separately. */
4191         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4192
4193         memcpy(&val32, val, 4);
4194
4195         /* Write the data. */
4196         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4197
4198         /* Address of the NVRAM to write to. */
4199         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4200
4201         /* Issue the write command. */
4202         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4203
4204         /* Wait for completion. */
4205         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4206                 udelay(5);
4207
4208                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4209                         break;
4210         }
4211         if (j >= NVRAM_TIMEOUT_COUNT)
4212                 return -EBUSY;
4213
4214         return 0;
4215 }
4216
4217 static int
4218 bnx2_init_nvram(struct bnx2 *bp)
4219 {
4220         u32 val;
4221         int j, entry_count, rc = 0;
4222         const struct flash_spec *flash;
4223
4224         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4225                 bp->flash_info = &flash_5709;
4226                 goto get_flash_size;
4227         }
4228
4229         /* Determine the selected interface. */
4230         val = REG_RD(bp, BNX2_NVM_CFG1);
4231
4232         entry_count = ARRAY_SIZE(flash_table);
4233
4234         if (val & 0x40000000) {
4235
4236                 /* Flash interface has been reconfigured */
4237                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4238                      j++, flash++) {
4239                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4240                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4241                                 bp->flash_info = flash;
4242                                 break;
4243                         }
4244                 }
4245         }
4246         else {
4247                 u32 mask;
4248                 /* Not yet been reconfigured */
4249
4250                 if (val & (1 << 23))
4251                         mask = FLASH_BACKUP_STRAP_MASK;
4252                 else
4253                         mask = FLASH_STRAP_MASK;
4254
4255                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4256                         j++, flash++) {
4257
4258                         if ((val & mask) == (flash->strapping & mask)) {
4259                                 bp->flash_info = flash;
4260
4261                                 /* Request access to the flash interface. */
4262                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4263                                         return rc;
4264
4265                                 /* Enable access to flash interface */
4266                                 bnx2_enable_nvram_access(bp);
4267
4268                                 /* Reconfigure the flash interface */
4269                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4270                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4271                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4272                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4273
4274                                 /* Disable access to flash interface */
4275                                 bnx2_disable_nvram_access(bp);
4276                                 bnx2_release_nvram_lock(bp);
4277
4278                                 break;
4279                         }
4280                 }
4281         } /* if (val & 0x40000000) */
4282
4283         if (j == entry_count) {
4284                 bp->flash_info = NULL;
4285                 pr_alert("Unknown flash/EEPROM type\n");
4286                 return -ENODEV;
4287         }
4288
4289 get_flash_size:
4290         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4291         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4292         if (val)
4293                 bp->flash_size = val;
4294         else
4295                 bp->flash_size = bp->flash_info->total_size;
4296
4297         return rc;
4298 }
4299
4300 static int
4301 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4302                 int buf_size)
4303 {
4304         int rc = 0;
4305         u32 cmd_flags, offset32, len32, extra;
4306
4307         if (buf_size == 0)
4308                 return 0;
4309
4310         /* Request access to the flash interface. */
4311         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4312                 return rc;
4313
4314         /* Enable access to flash interface */
4315         bnx2_enable_nvram_access(bp);
4316
4317         len32 = buf_size;
4318         offset32 = offset;
4319         extra = 0;
4320
4321         cmd_flags = 0;
4322
4323         if (offset32 & 3) {
4324                 u8 buf[4];
4325                 u32 pre_len;
4326
4327                 offset32 &= ~3;
4328                 pre_len = 4 - (offset & 3);
4329
4330                 if (pre_len >= len32) {
4331                         pre_len = len32;
4332                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4333                                     BNX2_NVM_COMMAND_LAST;
4334                 }
4335                 else {
4336                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4337                 }
4338
4339                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4340
4341                 if (rc)
4342                         return rc;
4343
4344                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4345
4346                 offset32 += 4;
4347                 ret_buf += pre_len;
4348                 len32 -= pre_len;
4349         }
4350         if (len32 & 3) {
4351                 extra = 4 - (len32 & 3);
4352                 len32 = (len32 + 4) & ~3;
4353         }
4354
4355         if (len32 == 4) {
4356                 u8 buf[4];
4357
4358                 if (cmd_flags)
4359                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4360                 else
4361                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4362                                     BNX2_NVM_COMMAND_LAST;
4363
4364                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4365
4366                 memcpy(ret_buf, buf, 4 - extra);
4367         }
4368         else if (len32 > 0) {
4369                 u8 buf[4];
4370
4371                 /* Read the first word. */
4372                 if (cmd_flags)
4373                         cmd_flags = 0;
4374                 else
4375                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4376
4377                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4378
4379                 /* Advance to the next dword. */
4380                 offset32 += 4;
4381                 ret_buf += 4;
4382                 len32 -= 4;
4383
4384                 while (len32 > 4 && rc == 0) {
4385                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4386
4387                         /* Advance to the next dword. */
4388                         offset32 += 4;
4389                         ret_buf += 4;
4390                         len32 -= 4;
4391                 }
4392
4393                 if (rc)
4394                         return rc;
4395
4396                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4397                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4398
4399                 memcpy(ret_buf, buf, 4 - extra);
4400         }
4401
4402         /* Disable access to flash interface */
4403         bnx2_disable_nvram_access(bp);
4404
4405         bnx2_release_nvram_lock(bp);
4406
4407         return rc;
4408 }
4409
4410 static int
4411 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4412                 int buf_size)
4413 {
4414         u32 written, offset32, len32;
4415         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4416         int rc = 0;
4417         int align_start, align_end;
4418
4419         buf = data_buf;
4420         offset32 = offset;
4421         len32 = buf_size;
4422         align_start = align_end = 0;
4423
4424         if ((align_start = (offset32 & 3))) {
4425                 offset32 &= ~3;
4426                 len32 += align_start;
4427                 if (len32 < 4)
4428                         len32 = 4;
4429                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4430                         return rc;
4431         }
4432
4433         if (len32 & 3) {
4434                 align_end = 4 - (len32 & 3);
4435                 len32 += align_end;
4436                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4437                         return rc;
4438         }
4439
4440         if (align_start || align_end) {
4441                 align_buf = kmalloc(len32, GFP_KERNEL);
4442                 if (align_buf == NULL)
4443                         return -ENOMEM;
4444                 if (align_start) {
4445                         memcpy(align_buf, start, 4);
4446                 }
4447                 if (align_end) {
4448                         memcpy(align_buf + len32 - 4, end, 4);
4449                 }
4450                 memcpy(align_buf + align_start, data_buf, buf_size);
4451                 buf = align_buf;
4452         }
4453
4454         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4455                 flash_buffer = kmalloc(264, GFP_KERNEL);
4456                 if (flash_buffer == NULL) {
4457                         rc = -ENOMEM;
4458                         goto nvram_write_end;
4459                 }
4460         }
4461
4462         written = 0;
4463         while ((written < len32) && (rc == 0)) {
4464                 u32 page_start, page_end, data_start, data_end;
4465                 u32 addr, cmd_flags;
4466                 int i;
4467
4468                 /* Find the page_start addr */
4469                 page_start = offset32 + written;
4470                 page_start -= (page_start % bp->flash_info->page_size);
4471                 /* Find the page_end addr */
4472                 page_end = page_start + bp->flash_info->page_size;
4473                 /* Find the data_start addr */
4474                 data_start = (written == 0) ? offset32 : page_start;
4475                 /* Find the data_end addr */
4476                 data_end = (page_end > offset32 + len32) ?
4477                         (offset32 + len32) : page_end;
4478
4479                 /* Request access to the flash interface. */
4480                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4481                         goto nvram_write_end;
4482
4483                 /* Enable access to flash interface */
4484                 bnx2_enable_nvram_access(bp);
4485
4486                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4487                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4488                         int j;
4489
4490                         /* Read the whole page into the buffer
4491                          * (non-buffer flash only) */
4492                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4493                                 if (j == (bp->flash_info->page_size - 4)) {
4494                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4495                                 }
4496                                 rc = bnx2_nvram_read_dword(bp,
4497                                         page_start + j,
4498                                         &flash_buffer[j],
4499                                         cmd_flags);
4500
4501                                 if (rc)
4502                                         goto nvram_write_end;
4503
4504                                 cmd_flags = 0;
4505                         }
4506                 }
4507
4508                 /* Enable writes to flash interface (unlock write-protect) */
4509                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4510                         goto nvram_write_end;
4511
4512                 /* Loop to write back the buffer data from page_start to
4513                  * data_start */
4514                 i = 0;
4515                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4516                         /* Erase the page */
4517                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4518                                 goto nvram_write_end;
4519
4520                         /* Re-enable the write again for the actual write */
4521                         bnx2_enable_nvram_write(bp);
4522
4523                         for (addr = page_start; addr < data_start;
4524                                 addr += 4, i += 4) {
4525
4526                                 rc = bnx2_nvram_write_dword(bp, addr,
4527                                         &flash_buffer[i], cmd_flags);
4528
4529                                 if (rc != 0)
4530                                         goto nvram_write_end;
4531
4532                                 cmd_flags = 0;
4533                         }
4534                 }
4535
4536                 /* Loop to write the new data from data_start to data_end */
4537                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4538                         if ((addr == page_end - 4) ||
4539                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4540                                  (addr == data_end - 4))) {
4541
4542                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4543                         }
4544                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4545                                 cmd_flags);
4546
4547                         if (rc != 0)
4548                                 goto nvram_write_end;
4549
4550                         cmd_flags = 0;
4551                         buf += 4;
4552                 }
4553
4554                 /* Loop to write back the buffer data from data_end
4555                  * to page_end */
4556                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4557                         for (addr = data_end; addr < page_end;
4558                                 addr += 4, i += 4) {
4559
4560                                 if (addr == page_end-4) {
4561                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4562                                 }
4563                                 rc = bnx2_nvram_write_dword(bp, addr,
4564                                         &flash_buffer[i], cmd_flags);
4565
4566                                 if (rc != 0)
4567                                         goto nvram_write_end;
4568
4569                                 cmd_flags = 0;
4570                         }
4571                 }
4572
4573                 /* Disable writes to flash interface (lock write-protect) */
4574                 bnx2_disable_nvram_write(bp);
4575
4576                 /* Disable access to flash interface */
4577                 bnx2_disable_nvram_access(bp);
4578                 bnx2_release_nvram_lock(bp);
4579
4580                 /* Increment written */
4581                 written += data_end - data_start;
4582         }
4583
4584 nvram_write_end:
4585         kfree(flash_buffer);
4586         kfree(align_buf);
4587         return rc;
4588 }
4589
4590 static void
4591 bnx2_init_fw_cap(struct bnx2 *bp)
4592 {
4593         u32 val, sig = 0;
4594
4595         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4596         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4597
4598         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4599                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4600
4601         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4602         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4603                 return;
4604
4605         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4606                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4607                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4608         }
4609
4610         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4611             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4612                 u32 link;
4613
4614                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4615
4616                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4617                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4618                         bp->phy_port = PORT_FIBRE;
4619                 else
4620                         bp->phy_port = PORT_TP;
4621
4622                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4623                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4624         }
4625
4626         if (netif_running(bp->dev) && sig)
4627                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4628 }
4629
4630 static void
4631 bnx2_setup_msix_tbl(struct bnx2 *bp)
4632 {
4633         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4634
4635         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4636         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4637 }
4638
4639 static int
4640 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4641 {
4642         u32 val;
4643         int i, rc = 0;
4644         u8 old_port;
4645
4646         /* Wait for the current PCI transaction to complete before
4647          * issuing a reset. */
4648         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4649             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4650                 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4651                        BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4652                        BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4653                        BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4654                        BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4655                 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4656                 udelay(5);
4657         } else {  /* 5709 */
4658                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4659                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4660                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4661                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4662
4663                 for (i = 0; i < 100; i++) {
4664                         msleep(1);
4665                         val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4666                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4667                                 break;
4668                 }
4669         }
4670
4671         /* Wait for the firmware to tell us it is ok to issue a reset. */
4672         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4673
4674         /* Deposit a driver reset signature so the firmware knows that
4675          * this is a soft reset. */
4676         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4677                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4678
4679         /* Do a dummy read to force the chip to complete all current transaction
4680          * before we issue a reset. */
4681         val = REG_RD(bp, BNX2_MISC_ID);
4682
4683         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4684                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4685                 REG_RD(bp, BNX2_MISC_COMMAND);
4686                 udelay(5);
4687
4688                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4689                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4690
4691                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4692
4693         } else {
4694                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4695                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4696                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4697
4698                 /* Chip reset. */
4699                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4700
4701                 /* Reading back any register after chip reset will hang the
4702                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4703                  * of margin for write posting.
4704                  */
4705                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4706                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4707                         msleep(20);
4708
4709                 /* Reset takes approximate 30 usec */
4710                 for (i = 0; i < 10; i++) {
4711                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4712                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4713                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4714                                 break;
4715                         udelay(10);
4716                 }
4717
4718                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4719                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4720                         pr_err("Chip reset did not complete\n");
4721                         return -EBUSY;
4722                 }
4723         }
4724
4725         /* Make sure byte swapping is properly configured. */
4726         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4727         if (val != 0x01020304) {
4728                 pr_err("Chip not in correct endian mode\n");
4729                 return -ENODEV;
4730         }
4731
4732         /* Wait for the firmware to finish its initialization. */
4733         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4734         if (rc)
4735                 return rc;
4736
4737         spin_lock_bh(&bp->phy_lock);
4738         old_port = bp->phy_port;
4739         bnx2_init_fw_cap(bp);
4740         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4741             old_port != bp->phy_port)
4742                 bnx2_set_default_remote_link(bp);
4743         spin_unlock_bh(&bp->phy_lock);
4744
4745         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4746                 /* Adjust the voltage regular to two steps lower.  The default
4747                  * of this register is 0x0000000e. */
4748                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4749
4750                 /* Remove bad rbuf memory from the free pool. */
4751                 rc = bnx2_alloc_bad_rbuf(bp);
4752         }
4753
4754         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4755                 bnx2_setup_msix_tbl(bp);
4756                 /* Prevent MSIX table reads and write from timing out */
4757                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4758                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4759         }
4760
4761         return rc;
4762 }
4763
4764 static int
4765 bnx2_init_chip(struct bnx2 *bp)
4766 {
4767         u32 val, mtu;
4768         int rc, i;
4769
4770         /* Make sure the interrupt is not active. */
4771         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4772
4773         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4774               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4775 #ifdef __BIG_ENDIAN
4776               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4777 #endif
4778               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4779               DMA_READ_CHANS << 12 |
4780               DMA_WRITE_CHANS << 16;
4781
4782         val |= (0x2 << 20) | (1 << 11);
4783
4784         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4785                 val |= (1 << 23);
4786
4787         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4788             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4789                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4790
4791         REG_WR(bp, BNX2_DMA_CONFIG, val);
4792
4793         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4794                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4795                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4796                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4797         }
4798
4799         if (bp->flags & BNX2_FLAG_PCIX) {
4800                 u16 val16;
4801
4802                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4803                                      &val16);
4804                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4805                                       val16 & ~PCI_X_CMD_ERO);
4806         }
4807
4808         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4809                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4810                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4811                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4812
4813         /* Initialize context mapping and zero out the quick contexts.  The
4814          * context block must have already been enabled. */
4815         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4816                 rc = bnx2_init_5709_context(bp);
4817                 if (rc)
4818                         return rc;
4819         } else
4820                 bnx2_init_context(bp);
4821
4822         if ((rc = bnx2_init_cpus(bp)) != 0)
4823                 return rc;
4824
4825         bnx2_init_nvram(bp);
4826
4827         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4828
4829         val = REG_RD(bp, BNX2_MQ_CONFIG);
4830         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4831         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4832         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4833                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4834                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4835                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4836         }
4837
4838         REG_WR(bp, BNX2_MQ_CONFIG, val);
4839
4840         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4841         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4842         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4843
4844         val = (BCM_PAGE_BITS - 8) << 24;
4845         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4846
4847         /* Configure page size. */
4848         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4849         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4850         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4851         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4852
4853         val = bp->mac_addr[0] +
4854               (bp->mac_addr[1] << 8) +
4855               (bp->mac_addr[2] << 16) +
4856               bp->mac_addr[3] +
4857               (bp->mac_addr[4] << 8) +
4858               (bp->mac_addr[5] << 16);
4859         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4860
4861         /* Program the MTU.  Also include 4 bytes for CRC32. */
4862         mtu = bp->dev->mtu;
4863         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4864         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4865                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4866         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4867
4868         if (mtu < 1500)
4869                 mtu = 1500;
4870
4871         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4872         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4873         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4874
4875         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4876         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4877                 bp->bnx2_napi[i].last_status_idx = 0;
4878
4879         bp->idle_chk_status_idx = 0xffff;
4880
4881         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4882
4883         /* Set up how to generate a link change interrupt. */
4884         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4885
4886         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4887                (u64) bp->status_blk_mapping & 0xffffffff);
4888         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4889
4890         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4891                (u64) bp->stats_blk_mapping & 0xffffffff);
4892         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4893                (u64) bp->stats_blk_mapping >> 32);
4894
4895         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4896                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4897
4898         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4899                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4900
4901         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4902                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4903
4904         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4905
4906         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4907
4908         REG_WR(bp, BNX2_HC_COM_TICKS,
4909                (bp->com_ticks_int << 16) | bp->com_ticks);
4910
4911         REG_WR(bp, BNX2_HC_CMD_TICKS,
4912                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4913
4914         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4915                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4916         else
4917                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4918         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4919
4920         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4921                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4922         else {
4923                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4924                       BNX2_HC_CONFIG_COLLECT_STATS;
4925         }
4926
4927         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4928                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4929                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4930
4931                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4932         }
4933
4934         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4935                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4936
4937         REG_WR(bp, BNX2_HC_CONFIG, val);
4938
4939         if (bp->rx_ticks < 25)
4940                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4941         else
4942                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4943
4944         for (i = 1; i < bp->irq_nvecs; i++) {
4945                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4946                            BNX2_HC_SB_CONFIG_1;
4947
4948                 REG_WR(bp, base,
4949                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4950                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4951                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4952
4953                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4954                         (bp->tx_quick_cons_trip_int << 16) |
4955                          bp->tx_quick_cons_trip);
4956
4957                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4958                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4959
4960                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4961                        (bp->rx_quick_cons_trip_int << 16) |
4962                         bp->rx_quick_cons_trip);
4963
4964                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4965                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4966         }
4967
4968         /* Clear internal stats counters. */
4969         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4970
4971         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4972
4973         /* Initialize the receive filter. */
4974         bnx2_set_rx_mode(bp->dev);
4975
4976         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4977                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4978                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4979                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4980         }
4981         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4982                           1, 0);
4983
4984         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4985         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4986
4987         udelay(20);
4988
4989         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4990
4991         return rc;
4992 }
4993
4994 static void
4995 bnx2_clear_ring_states(struct bnx2 *bp)
4996 {
4997         struct bnx2_napi *bnapi;
4998         struct bnx2_tx_ring_info *txr;
4999         struct bnx2_rx_ring_info *rxr;
5000         int i;
5001
5002         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5003                 bnapi = &bp->bnx2_napi[i];
5004                 txr = &bnapi->tx_ring;
5005                 rxr = &bnapi->rx_ring;
5006
5007                 txr->tx_cons = 0;
5008                 txr->hw_tx_cons = 0;
5009                 rxr->rx_prod_bseq = 0;
5010                 rxr->rx_prod = 0;
5011                 rxr->rx_cons = 0;
5012                 rxr->rx_pg_prod = 0;
5013                 rxr->rx_pg_cons = 0;
5014         }
5015 }
5016
5017 static void
5018 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5019 {
5020         u32 val, offset0, offset1, offset2, offset3;
5021         u32 cid_addr = GET_CID_ADDR(cid);
5022
5023         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5024                 offset0 = BNX2_L2CTX_TYPE_XI;
5025                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5026                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5027                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5028         } else {
5029                 offset0 = BNX2_L2CTX_TYPE;
5030                 offset1 = BNX2_L2CTX_CMD_TYPE;
5031                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5032                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5033         }
5034         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5035         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5036
5037         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5038         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5039
5040         val = (u64) txr->tx_desc_mapping >> 32;
5041         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5042
5043         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5044         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5045 }
5046
5047 static void
5048 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5049 {
5050         struct tx_bd *txbd;
5051         u32 cid = TX_CID;
5052         struct bnx2_napi *bnapi;
5053         struct bnx2_tx_ring_info *txr;
5054
5055         bnapi = &bp->bnx2_napi[ring_num];
5056         txr = &bnapi->tx_ring;
5057
5058         if (ring_num == 0)
5059                 cid = TX_CID;
5060         else
5061                 cid = TX_TSS_CID + ring_num - 1;
5062
5063         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5064
5065         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5066
5067         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5068         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5069
5070         txr->tx_prod = 0;
5071         txr->tx_prod_bseq = 0;
5072
5073         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5074         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5075
5076         bnx2_init_tx_context(bp, cid, txr);
5077 }
5078
5079 static void
5080 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5081                      int num_rings)
5082 {
5083         int i;
5084         struct rx_bd *rxbd;
5085
5086         for (i = 0; i < num_rings; i++) {
5087                 int j;
5088
5089                 rxbd = &rx_ring[i][0];
5090                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5091                         rxbd->rx_bd_len = buf_size;
5092                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5093                 }
5094                 if (i == (num_rings - 1))
5095                         j = 0;
5096                 else
5097                         j = i + 1;
5098                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5099                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5100         }
5101 }
5102
5103 static void
5104 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5105 {
5106         int i;
5107         u16 prod, ring_prod;
5108         u32 cid, rx_cid_addr, val;
5109         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5110         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5111
5112         if (ring_num == 0)
5113                 cid = RX_CID;
5114         else
5115                 cid = RX_RSS_CID + ring_num - 1;
5116
5117         rx_cid_addr = GET_CID_ADDR(cid);
5118
5119         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5120                              bp->rx_buf_use_size, bp->rx_max_ring);
5121
5122         bnx2_init_rx_context(bp, cid);
5123
5124         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5125                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5126                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5127         }
5128
5129         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5130         if (bp->rx_pg_ring_size) {
5131                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5132                                      rxr->rx_pg_desc_mapping,
5133                                      PAGE_SIZE, bp->rx_max_pg_ring);
5134                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5135                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5136                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5137                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5138
5139                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5140                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5141
5142                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5143                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5144
5145                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5146                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5147         }
5148
5149         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5150         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5151
5152         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5153         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5154
5155         ring_prod = prod = rxr->rx_pg_prod;
5156         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5157                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5158                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5159                                     ring_num, i, bp->rx_pg_ring_size);
5160                         break;
5161                 }
5162                 prod = NEXT_RX_BD(prod);
5163                 ring_prod = RX_PG_RING_IDX(prod);
5164         }
5165         rxr->rx_pg_prod = prod;
5166
5167         ring_prod = prod = rxr->rx_prod;
5168         for (i = 0; i < bp->rx_ring_size; i++) {
5169                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5170                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5171                                     ring_num, i, bp->rx_ring_size);
5172                         break;
5173                 }
5174                 prod = NEXT_RX_BD(prod);
5175                 ring_prod = RX_RING_IDX(prod);
5176         }
5177         rxr->rx_prod = prod;
5178
5179         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5180         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5181         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5182
5183         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5184         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5185
5186         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5187 }
5188
5189 static void
5190 bnx2_init_all_rings(struct bnx2 *bp)
5191 {
5192         int i;
5193         u32 val;
5194
5195         bnx2_clear_ring_states(bp);
5196
5197         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5198         for (i = 0; i < bp->num_tx_rings; i++)
5199                 bnx2_init_tx_ring(bp, i);
5200
5201         if (bp->num_tx_rings > 1)
5202                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5203                        (TX_TSS_CID << 7));
5204
5205         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5206         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5207
5208         for (i = 0; i < bp->num_rx_rings; i++)
5209                 bnx2_init_rx_ring(bp, i);
5210
5211         if (bp->num_rx_rings > 1) {
5212                 u32 tbl_32 = 0;
5213
5214                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5215                         int shift = (i % 8) << 2;
5216
5217                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5218                         if ((i % 8) == 7) {
5219                                 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5220                                 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5221                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5222                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5223                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5224                                 tbl_32 = 0;
5225                         }
5226                 }
5227
5228                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5229                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5230
5231                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5232
5233         }
5234 }
5235
5236 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5237 {
5238         u32 max, num_rings = 1;
5239
5240         while (ring_size > MAX_RX_DESC_CNT) {
5241                 ring_size -= MAX_RX_DESC_CNT;
5242                 num_rings++;
5243         }
5244         /* round to next power of 2 */
5245         max = max_size;
5246         while ((max & num_rings) == 0)
5247                 max >>= 1;
5248
5249         if (num_rings != max)
5250                 max <<= 1;
5251
5252         return max;
5253 }
5254
5255 static void
5256 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5257 {
5258         u32 rx_size, rx_space, jumbo_size;
5259
5260         /* 8 for CRC and VLAN */
5261         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5262
5263         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5264                 sizeof(struct skb_shared_info);
5265
5266         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5267         bp->rx_pg_ring_size = 0;
5268         bp->rx_max_pg_ring = 0;
5269         bp->rx_max_pg_ring_idx = 0;
5270         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5271                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5272
5273                 jumbo_size = size * pages;
5274                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5275                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5276
5277                 bp->rx_pg_ring_size = jumbo_size;
5278                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5279                                                         MAX_RX_PG_RINGS);
5280                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5281                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5282                 bp->rx_copy_thresh = 0;
5283         }
5284
5285         bp->rx_buf_use_size = rx_size;
5286         /* hw alignment */
5287         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5288         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5289         bp->rx_ring_size = size;
5290         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5291         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5292 }
5293
5294 static void
5295 bnx2_free_tx_skbs(struct bnx2 *bp)
5296 {
5297         int i;
5298
5299         for (i = 0; i < bp->num_tx_rings; i++) {
5300                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5301                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5302                 int j;
5303
5304                 if (txr->tx_buf_ring == NULL)
5305                         continue;
5306
5307                 for (j = 0; j < TX_DESC_CNT; ) {
5308                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5309                         struct sk_buff *skb = tx_buf->skb;
5310                         int k, last;
5311
5312                         if (skb == NULL) {
5313                                 j++;
5314                                 continue;
5315                         }
5316
5317                         dma_unmap_single(&bp->pdev->dev,
5318                                          dma_unmap_addr(tx_buf, mapping),
5319                                          skb_headlen(skb),
5320                                          PCI_DMA_TODEVICE);
5321
5322                         tx_buf->skb = NULL;
5323
5324                         last = tx_buf->nr_frags;
5325                         j++;
5326                         for (k = 0; k < last; k++, j++) {
5327                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5328                                 dma_unmap_page(&bp->pdev->dev,
5329                                         dma_unmap_addr(tx_buf, mapping),
5330                                         skb_shinfo(skb)->frags[k].size,
5331                                         PCI_DMA_TODEVICE);
5332                         }
5333                         dev_kfree_skb(skb);
5334                 }
5335         }
5336 }
5337
5338 static void
5339 bnx2_free_rx_skbs(struct bnx2 *bp)
5340 {
5341         int i;
5342
5343         for (i = 0; i < bp->num_rx_rings; i++) {
5344                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5345                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5346                 int j;
5347
5348                 if (rxr->rx_buf_ring == NULL)
5349                         return;
5350
5351                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5352                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5353                         struct sk_buff *skb = rx_buf->skb;
5354
5355                         if (skb == NULL)
5356                                 continue;
5357
5358                         dma_unmap_single(&bp->pdev->dev,
5359                                          dma_unmap_addr(rx_buf, mapping),
5360                                          bp->rx_buf_use_size,
5361                                          PCI_DMA_FROMDEVICE);
5362
5363                         rx_buf->skb = NULL;
5364
5365                         dev_kfree_skb(skb);
5366                 }
5367                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5368                         bnx2_free_rx_page(bp, rxr, j);
5369         }
5370 }
5371
5372 static void
5373 bnx2_free_skbs(struct bnx2 *bp)
5374 {
5375         bnx2_free_tx_skbs(bp);
5376         bnx2_free_rx_skbs(bp);
5377 }
5378
5379 static int
5380 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5381 {
5382         int rc;
5383
5384         rc = bnx2_reset_chip(bp, reset_code);
5385         bnx2_free_skbs(bp);
5386         if (rc)
5387                 return rc;
5388
5389         if ((rc = bnx2_init_chip(bp)) != 0)
5390                 return rc;
5391
5392         bnx2_init_all_rings(bp);
5393         return 0;
5394 }
5395
5396 static int
5397 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5398 {
5399         int rc;
5400
5401         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5402                 return rc;
5403
5404         spin_lock_bh(&bp->phy_lock);
5405         bnx2_init_phy(bp, reset_phy);
5406         bnx2_set_link(bp);
5407         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5408                 bnx2_remote_phy_event(bp);
5409         spin_unlock_bh(&bp->phy_lock);
5410         return 0;
5411 }
5412
5413 static int
5414 bnx2_shutdown_chip(struct bnx2 *bp)
5415 {
5416         u32 reset_code;
5417
5418         if (bp->flags & BNX2_FLAG_NO_WOL)
5419                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5420         else if (bp->wol)
5421                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5422         else
5423                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5424
5425         return bnx2_reset_chip(bp, reset_code);
5426 }
5427
5428 static int
5429 bnx2_test_registers(struct bnx2 *bp)
5430 {
5431         int ret;
5432         int i, is_5709;
5433         static const struct {
5434                 u16   offset;
5435                 u16   flags;
5436 #define BNX2_FL_NOT_5709        1
5437                 u32   rw_mask;
5438                 u32   ro_mask;
5439         } reg_tbl[] = {
5440                 { 0x006c, 0, 0x00000000, 0x0000003f },
5441                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5442                 { 0x0094, 0, 0x00000000, 0x00000000 },
5443
5444                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5445                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5446                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5447                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5448                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5449                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5450                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5451                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5452                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5453
5454                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5455                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5456                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5457                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5458                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5459                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5460
5461                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5462                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5463                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5464
5465                 { 0x1000, 0, 0x00000000, 0x00000001 },
5466                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5467
5468                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5469                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5470                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5471                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5472                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5473                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5474                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5475                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5476                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5477                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5478
5479                 { 0x1800, 0, 0x00000000, 0x00000001 },
5480                 { 0x1804, 0, 0x00000000, 0x00000003 },
5481
5482                 { 0x2800, 0, 0x00000000, 0x00000001 },
5483                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5484                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5485                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5486                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5487                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5488                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5489                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5490                 { 0x2840, 0, 0x00000000, 0xffffffff },
5491                 { 0x2844, 0, 0x00000000, 0xffffffff },
5492                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5493                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5494
5495                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5496                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5497
5498                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5499                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5500                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5501                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5502                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5503                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5504                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5505                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5506                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5507
5508                 { 0x5004, 0, 0x00000000, 0x0000007f },
5509                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5510
5511                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5512                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5513                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5514                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5515                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5516                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5517                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5518                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5519                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5520
5521                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5522                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5523                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5524                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5525                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5526                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5527                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5528                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5529                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5530                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5531                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5532                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5533                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5534                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5535                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5536                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5537                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5538                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5539                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5540                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5541                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5542                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5543                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5544
5545                 { 0xffff, 0, 0x00000000, 0x00000000 },
5546         };
5547
5548         ret = 0;
5549         is_5709 = 0;
5550         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5551                 is_5709 = 1;
5552
5553         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5554                 u32 offset, rw_mask, ro_mask, save_val, val;
5555                 u16 flags = reg_tbl[i].flags;
5556
5557                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5558                         continue;
5559
5560                 offset = (u32) reg_tbl[i].offset;
5561                 rw_mask = reg_tbl[i].rw_mask;
5562                 ro_mask = reg_tbl[i].ro_mask;
5563
5564                 save_val = readl(bp->regview + offset);
5565
5566                 writel(0, bp->regview + offset);
5567
5568                 val = readl(bp->regview + offset);
5569                 if ((val & rw_mask) != 0) {
5570                         goto reg_test_err;
5571                 }
5572
5573                 if ((val & ro_mask) != (save_val & ro_mask)) {
5574                         goto reg_test_err;
5575                 }
5576
5577                 writel(0xffffffff, bp->regview + offset);
5578
5579                 val = readl(bp->regview + offset);
5580                 if ((val & rw_mask) != rw_mask) {
5581                         goto reg_test_err;
5582                 }
5583
5584                 if ((val & ro_mask) != (save_val & ro_mask)) {
5585                         goto reg_test_err;
5586                 }
5587
5588                 writel(save_val, bp->regview + offset);
5589                 continue;
5590
5591 reg_test_err:
5592                 writel(save_val, bp->regview + offset);
5593                 ret = -ENODEV;
5594                 break;
5595         }
5596         return ret;
5597 }
5598
5599 static int
5600 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5601 {
5602         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5603                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5604         int i;
5605
5606         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5607                 u32 offset;
5608
5609                 for (offset = 0; offset < size; offset += 4) {
5610
5611                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5612
5613                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5614                                 test_pattern[i]) {
5615                                 return -ENODEV;
5616                         }
5617                 }
5618         }
5619         return 0;
5620 }
5621
5622 static int
5623 bnx2_test_memory(struct bnx2 *bp)
5624 {
5625         int ret = 0;
5626         int i;
5627         static struct mem_entry {
5628                 u32   offset;
5629                 u32   len;
5630         } mem_tbl_5706[] = {
5631                 { 0x60000,  0x4000 },
5632                 { 0xa0000,  0x3000 },
5633                 { 0xe0000,  0x4000 },
5634                 { 0x120000, 0x4000 },
5635                 { 0x1a0000, 0x4000 },
5636                 { 0x160000, 0x4000 },
5637                 { 0xffffffff, 0    },
5638         },
5639         mem_tbl_5709[] = {
5640                 { 0x60000,  0x4000 },
5641                 { 0xa0000,  0x3000 },
5642                 { 0xe0000,  0x4000 },
5643                 { 0x120000, 0x4000 },
5644                 { 0x1a0000, 0x4000 },
5645                 { 0xffffffff, 0    },
5646         };
5647         struct mem_entry *mem_tbl;
5648
5649         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5650                 mem_tbl = mem_tbl_5709;
5651         else
5652                 mem_tbl = mem_tbl_5706;
5653
5654         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5655                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5656                         mem_tbl[i].len)) != 0) {
5657                         return ret;
5658                 }
5659         }
5660
5661         return ret;
5662 }
5663
5664 #define BNX2_MAC_LOOPBACK       0
5665 #define BNX2_PHY_LOOPBACK       1
5666
5667 static int
5668 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5669 {
5670         unsigned int pkt_size, num_pkts, i;
5671         struct sk_buff *skb, *rx_skb;
5672         unsigned char *packet;
5673         u16 rx_start_idx, rx_idx;
5674         dma_addr_t map;
5675         struct tx_bd *txbd;
5676         struct sw_bd *rx_buf;
5677         struct l2_fhdr *rx_hdr;
5678         int ret = -ENODEV;
5679         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5680         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5681         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5682
5683         tx_napi = bnapi;
5684
5685         txr = &tx_napi->tx_ring;
5686         rxr = &bnapi->rx_ring;
5687         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5688                 bp->loopback = MAC_LOOPBACK;
5689                 bnx2_set_mac_loopback(bp);
5690         }
5691         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5692                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5693                         return 0;
5694
5695                 bp->loopback = PHY_LOOPBACK;
5696                 bnx2_set_phy_loopback(bp);
5697         }
5698         else
5699                 return -EINVAL;
5700
5701         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5702         skb = netdev_alloc_skb(bp->dev, pkt_size);
5703         if (!skb)
5704                 return -ENOMEM;
5705         packet = skb_put(skb, pkt_size);
5706         memcpy(packet, bp->dev->dev_addr, 6);
5707         memset(packet + 6, 0x0, 8);
5708         for (i = 14; i < pkt_size; i++)
5709                 packet[i] = (unsigned char) (i & 0xff);
5710
5711         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5712                              PCI_DMA_TODEVICE);
5713         if (dma_mapping_error(&bp->pdev->dev, map)) {
5714                 dev_kfree_skb(skb);
5715                 return -EIO;
5716         }
5717
5718         REG_WR(bp, BNX2_HC_COMMAND,
5719                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5720
5721         REG_RD(bp, BNX2_HC_COMMAND);
5722
5723         udelay(5);
5724         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5725
5726         num_pkts = 0;
5727
5728         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5729
5730         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5731         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5732         txbd->tx_bd_mss_nbytes = pkt_size;
5733         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5734
5735         num_pkts++;
5736         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5737         txr->tx_prod_bseq += pkt_size;
5738
5739         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5740         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5741
5742         udelay(100);
5743
5744         REG_WR(bp, BNX2_HC_COMMAND,
5745                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5746
5747         REG_RD(bp, BNX2_HC_COMMAND);
5748
5749         udelay(5);
5750
5751         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5752         dev_kfree_skb(skb);
5753
5754         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5755                 goto loopback_test_done;
5756
5757         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5758         if (rx_idx != rx_start_idx + num_pkts) {
5759                 goto loopback_test_done;
5760         }
5761
5762         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5763         rx_skb = rx_buf->skb;
5764
5765         rx_hdr = rx_buf->desc;
5766         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5767
5768         dma_sync_single_for_cpu(&bp->pdev->dev,
5769                 dma_unmap_addr(rx_buf, mapping),
5770                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5771
5772         if (rx_hdr->l2_fhdr_status &
5773                 (L2_FHDR_ERRORS_BAD_CRC |
5774                 L2_FHDR_ERRORS_PHY_DECODE |
5775                 L2_FHDR_ERRORS_ALIGNMENT |
5776                 L2_FHDR_ERRORS_TOO_SHORT |
5777                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5778
5779                 goto loopback_test_done;
5780         }
5781
5782         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5783                 goto loopback_test_done;
5784         }
5785
5786         for (i = 14; i < pkt_size; i++) {
5787                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5788                         goto loopback_test_done;
5789                 }
5790         }
5791
5792         ret = 0;
5793
5794 loopback_test_done:
5795         bp->loopback = 0;
5796         return ret;
5797 }
5798
5799 #define BNX2_MAC_LOOPBACK_FAILED        1
5800 #define BNX2_PHY_LOOPBACK_FAILED        2
5801 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5802                                          BNX2_PHY_LOOPBACK_FAILED)
5803
5804 static int
5805 bnx2_test_loopback(struct bnx2 *bp)
5806 {
5807         int rc = 0;
5808
5809         if (!netif_running(bp->dev))
5810                 return BNX2_LOOPBACK_FAILED;
5811
5812         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5813         spin_lock_bh(&bp->phy_lock);
5814         bnx2_init_phy(bp, 1);
5815         spin_unlock_bh(&bp->phy_lock);
5816         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5817                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5818         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5819                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5820         return rc;
5821 }
5822
5823 #define NVRAM_SIZE 0x200
5824 #define CRC32_RESIDUAL 0xdebb20e3
5825
5826 static int
5827 bnx2_test_nvram(struct bnx2 *bp)
5828 {
5829         __be32 buf[NVRAM_SIZE / 4];
5830         u8 *data = (u8 *) buf;
5831         int rc = 0;
5832         u32 magic, csum;
5833
5834         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5835                 goto test_nvram_done;
5836
5837         magic = be32_to_cpu(buf[0]);
5838         if (magic != 0x669955aa) {
5839                 rc = -ENODEV;
5840                 goto test_nvram_done;
5841         }
5842
5843         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5844                 goto test_nvram_done;
5845
5846         csum = ether_crc_le(0x100, data);
5847         if (csum != CRC32_RESIDUAL) {
5848                 rc = -ENODEV;
5849                 goto test_nvram_done;
5850         }
5851
5852         csum = ether_crc_le(0x100, data + 0x100);
5853         if (csum != CRC32_RESIDUAL) {
5854                 rc = -ENODEV;
5855         }
5856
5857 test_nvram_done:
5858         return rc;
5859 }
5860
5861 static int
5862 bnx2_test_link(struct bnx2 *bp)
5863 {
5864         u32 bmsr;
5865
5866         if (!netif_running(bp->dev))
5867                 return -ENODEV;
5868
5869         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5870                 if (bp->link_up)
5871                         return 0;
5872                 return -ENODEV;
5873         }
5874         spin_lock_bh(&bp->phy_lock);
5875         bnx2_enable_bmsr1(bp);
5876         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5877         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5878         bnx2_disable_bmsr1(bp);
5879         spin_unlock_bh(&bp->phy_lock);
5880
5881         if (bmsr & BMSR_LSTATUS) {
5882                 return 0;
5883         }
5884         return -ENODEV;
5885 }
5886
5887 static int
5888 bnx2_test_intr(struct bnx2 *bp)
5889 {
5890         int i;
5891         u16 status_idx;
5892
5893         if (!netif_running(bp->dev))
5894                 return -ENODEV;
5895
5896         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5897
5898         /* This register is not touched during run-time. */
5899         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5900         REG_RD(bp, BNX2_HC_COMMAND);
5901
5902         for (i = 0; i < 10; i++) {
5903                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5904                         status_idx) {
5905
5906                         break;
5907                 }
5908
5909                 msleep_interruptible(10);
5910         }
5911         if (i < 10)
5912                 return 0;
5913
5914         return -ENODEV;
5915 }
5916
5917 /* Determining link for parallel detection. */
5918 static int
5919 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5920 {
5921         u32 mode_ctl, an_dbg, exp;
5922
5923         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5924                 return 0;
5925
5926         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5927         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5928
5929         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5930                 return 0;
5931
5932         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5933         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5934         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5935
5936         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5937                 return 0;
5938
5939         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5940         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5941         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5942
5943         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5944                 return 0;
5945
5946         return 1;
5947 }
5948
5949 static void
5950 bnx2_5706_serdes_timer(struct bnx2 *bp)
5951 {
5952         int check_link = 1;
5953
5954         spin_lock(&bp->phy_lock);
5955         if (bp->serdes_an_pending) {
5956                 bp->serdes_an_pending--;
5957                 check_link = 0;
5958         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5959                 u32 bmcr;
5960
5961                 bp->current_interval = BNX2_TIMER_INTERVAL;
5962
5963                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5964
5965                 if (bmcr & BMCR_ANENABLE) {
5966                         if (bnx2_5706_serdes_has_link(bp)) {
5967                                 bmcr &= ~BMCR_ANENABLE;
5968                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5969                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5970                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5971                         }
5972                 }
5973         }
5974         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5975                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5976                 u32 phy2;
5977
5978                 bnx2_write_phy(bp, 0x17, 0x0f01);
5979                 bnx2_read_phy(bp, 0x15, &phy2);
5980                 if (phy2 & 0x20) {
5981                         u32 bmcr;
5982
5983                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5984                         bmcr |= BMCR_ANENABLE;
5985                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5986
5987                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5988                 }
5989         } else
5990                 bp->current_interval = BNX2_TIMER_INTERVAL;
5991
5992         if (check_link) {
5993                 u32 val;
5994
5995                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5996                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5997                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5998
5999                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6000                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6001                                 bnx2_5706s_force_link_dn(bp, 1);
6002                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6003                         } else
6004                                 bnx2_set_link(bp);
6005                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6006                         bnx2_set_link(bp);
6007         }
6008         spin_unlock(&bp->phy_lock);
6009 }
6010
6011 static void
6012 bnx2_5708_serdes_timer(struct bnx2 *bp)
6013 {
6014         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6015                 return;
6016
6017         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6018                 bp->serdes_an_pending = 0;
6019                 return;
6020         }
6021
6022         spin_lock(&bp->phy_lock);
6023         if (bp->serdes_an_pending)
6024                 bp->serdes_an_pending--;
6025         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6026                 u32 bmcr;
6027
6028                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6029                 if (bmcr & BMCR_ANENABLE) {
6030                         bnx2_enable_forced_2g5(bp);
6031                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6032                 } else {
6033                         bnx2_disable_forced_2g5(bp);
6034                         bp->serdes_an_pending = 2;
6035                         bp->current_interval = BNX2_TIMER_INTERVAL;
6036                 }
6037
6038         } else
6039                 bp->current_interval = BNX2_TIMER_INTERVAL;
6040
6041         spin_unlock(&bp->phy_lock);
6042 }
6043
6044 static void
6045 bnx2_timer(unsigned long data)
6046 {
6047         struct bnx2 *bp = (struct bnx2 *) data;
6048
6049         if (!netif_running(bp->dev))
6050                 return;
6051
6052         if (atomic_read(&bp->intr_sem) != 0)
6053                 goto bnx2_restart_timer;
6054
6055         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6056              BNX2_FLAG_USING_MSI)
6057                 bnx2_chk_missed_msi(bp);
6058
6059         bnx2_send_heart_beat(bp);
6060
6061         bp->stats_blk->stat_FwRxDrop =
6062                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6063
6064         /* workaround occasional corrupted counters */
6065         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6066                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6067                                             BNX2_HC_COMMAND_STATS_NOW);
6068
6069         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6070                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6071                         bnx2_5706_serdes_timer(bp);
6072                 else
6073                         bnx2_5708_serdes_timer(bp);
6074         }
6075
6076 bnx2_restart_timer:
6077         mod_timer(&bp->timer, jiffies + bp->current_interval);
6078 }
6079
6080 static int
6081 bnx2_request_irq(struct bnx2 *bp)
6082 {
6083         unsigned long flags;
6084         struct bnx2_irq *irq;
6085         int rc = 0, i;
6086
6087         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6088                 flags = 0;
6089         else
6090                 flags = IRQF_SHARED;
6091
6092         for (i = 0; i < bp->irq_nvecs; i++) {
6093                 irq = &bp->irq_tbl[i];
6094                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6095                                  &bp->bnx2_napi[i]);
6096                 if (rc)
6097                         break;
6098                 irq->requested = 1;
6099         }
6100         return rc;
6101 }
6102
6103 static void
6104 __bnx2_free_irq(struct bnx2 *bp)
6105 {
6106         struct bnx2_irq *irq;
6107         int i;
6108
6109         for (i = 0; i < bp->irq_nvecs; i++) {
6110                 irq = &bp->irq_tbl[i];
6111                 if (irq->requested)
6112                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6113                 irq->requested = 0;
6114         }
6115 }
6116
6117 static void
6118 bnx2_free_irq(struct bnx2 *bp)
6119 {
6120
6121         __bnx2_free_irq(bp);
6122         if (bp->flags & BNX2_FLAG_USING_MSI)
6123                 pci_disable_msi(bp->pdev);
6124         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6125                 pci_disable_msix(bp->pdev);
6126
6127         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6128 }
6129
6130 static void
6131 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6132 {
6133         int i, total_vecs, rc;
6134         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6135         struct net_device *dev = bp->dev;
6136         const int len = sizeof(bp->irq_tbl[0].name);
6137
6138         bnx2_setup_msix_tbl(bp);
6139         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6140         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6141         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6142
6143         /*  Need to flush the previous three writes to ensure MSI-X
6144          *  is setup properly */
6145         REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6146
6147         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6148                 msix_ent[i].entry = i;
6149                 msix_ent[i].vector = 0;
6150         }
6151
6152         total_vecs = msix_vecs;
6153 #ifdef BCM_CNIC
6154         total_vecs++;
6155 #endif
6156         rc = -ENOSPC;
6157         while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6158                 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6159                 if (rc <= 0)
6160                         break;
6161                 if (rc > 0)
6162                         total_vecs = rc;
6163         }
6164
6165         if (rc != 0)
6166                 return;
6167
6168         msix_vecs = total_vecs;
6169 #ifdef BCM_CNIC
6170         msix_vecs--;
6171 #endif
6172         bp->irq_nvecs = msix_vecs;
6173         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6174         for (i = 0; i < total_vecs; i++) {
6175                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6176                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6177                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6178         }
6179 }
6180
6181 static int
6182 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6183 {
6184         int cpus = num_online_cpus();
6185         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6186
6187         bp->irq_tbl[0].handler = bnx2_interrupt;
6188         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6189         bp->irq_nvecs = 1;
6190         bp->irq_tbl[0].vector = bp->pdev->irq;
6191
6192         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6193                 bnx2_enable_msix(bp, msix_vecs);
6194
6195         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6196             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6197                 if (pci_enable_msi(bp->pdev) == 0) {
6198                         bp->flags |= BNX2_FLAG_USING_MSI;
6199                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6200                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6201                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6202                         } else
6203                                 bp->irq_tbl[0].handler = bnx2_msi;
6204
6205                         bp->irq_tbl[0].vector = bp->pdev->irq;
6206                 }
6207         }
6208
6209         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6210         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6211
6212         bp->num_rx_rings = bp->irq_nvecs;
6213         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6214 }
6215
6216 /* Called with rtnl_lock */
6217 static int
6218 bnx2_open(struct net_device *dev)
6219 {
6220         struct bnx2 *bp = netdev_priv(dev);
6221         int rc;
6222
6223         netif_carrier_off(dev);
6224
6225         bnx2_set_power_state(bp, PCI_D0);
6226         bnx2_disable_int(bp);
6227
6228         rc = bnx2_setup_int_mode(bp, disable_msi);
6229         if (rc)
6230                 goto open_err;
6231         bnx2_init_napi(bp);
6232         bnx2_napi_enable(bp);
6233         rc = bnx2_alloc_mem(bp);
6234         if (rc)
6235                 goto open_err;
6236
6237         rc = bnx2_request_irq(bp);
6238         if (rc)
6239                 goto open_err;
6240
6241         rc = bnx2_init_nic(bp, 1);
6242         if (rc)
6243                 goto open_err;
6244
6245         mod_timer(&bp->timer, jiffies + bp->current_interval);
6246
6247         atomic_set(&bp->intr_sem, 0);
6248
6249         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6250
6251         bnx2_enable_int(bp);
6252
6253         if (bp->flags & BNX2_FLAG_USING_MSI) {
6254                 /* Test MSI to make sure it is working
6255                  * If MSI test fails, go back to INTx mode
6256                  */
6257                 if (bnx2_test_intr(bp) != 0) {
6258                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6259
6260                         bnx2_disable_int(bp);
6261                         bnx2_free_irq(bp);
6262
6263                         bnx2_setup_int_mode(bp, 1);
6264
6265                         rc = bnx2_init_nic(bp, 0);
6266
6267                         if (!rc)
6268                                 rc = bnx2_request_irq(bp);
6269
6270                         if (rc) {
6271                                 del_timer_sync(&bp->timer);
6272                                 goto open_err;
6273                         }
6274                         bnx2_enable_int(bp);
6275                 }
6276         }
6277         if (bp->flags & BNX2_FLAG_USING_MSI)
6278                 netdev_info(dev, "using MSI\n");
6279         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6280                 netdev_info(dev, "using MSIX\n");
6281
6282         netif_tx_start_all_queues(dev);
6283
6284         return 0;
6285
6286 open_err:
6287         bnx2_napi_disable(bp);
6288         bnx2_free_skbs(bp);
6289         bnx2_free_irq(bp);
6290         bnx2_free_mem(bp);
6291         bnx2_del_napi(bp);
6292         return rc;
6293 }
6294
6295 static void
6296 bnx2_reset_task(struct work_struct *work)
6297 {
6298         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6299
6300         rtnl_lock();
6301         if (!netif_running(bp->dev)) {
6302                 rtnl_unlock();
6303                 return;
6304         }
6305
6306         bnx2_netif_stop(bp, true);
6307
6308         bnx2_init_nic(bp, 1);
6309
6310         atomic_set(&bp->intr_sem, 1);
6311         bnx2_netif_start(bp, true);
6312         rtnl_unlock();
6313 }
6314
6315 static void
6316 bnx2_dump_state(struct bnx2 *bp)
6317 {
6318         struct net_device *dev = bp->dev;
6319         u32 mcp_p0, mcp_p1, val1, val2;
6320
6321         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6322         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6323                    atomic_read(&bp->intr_sem), val1);
6324         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6325         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6326         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6327         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6328                    REG_RD(bp, BNX2_EMAC_TX_STATUS),
6329                    REG_RD(bp, BNX2_EMAC_RX_STATUS));
6330         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6331                    REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6332         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6333                 mcp_p0 = BNX2_MCP_STATE_P0;
6334                 mcp_p1 = BNX2_MCP_STATE_P1;
6335         } else {
6336                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
6337                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
6338         }
6339         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6340                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6341         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6342                    REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6343         if (bp->flags & BNX2_FLAG_USING_MSIX)
6344                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6345                            REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6346 }
6347
6348 static void
6349 bnx2_tx_timeout(struct net_device *dev)
6350 {
6351         struct bnx2 *bp = netdev_priv(dev);
6352
6353         bnx2_dump_state(bp);
6354
6355         /* This allows the netif to be shutdown gracefully before resetting */
6356         schedule_work(&bp->reset_task);
6357 }
6358
6359 /* Called with netif_tx_lock.
6360  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6361  * netif_wake_queue().
6362  */
6363 static netdev_tx_t
6364 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6365 {
6366         struct bnx2 *bp = netdev_priv(dev);
6367         dma_addr_t mapping;
6368         struct tx_bd *txbd;
6369         struct sw_tx_bd *tx_buf;
6370         u32 len, vlan_tag_flags, last_frag, mss;
6371         u16 prod, ring_prod;
6372         int i;
6373         struct bnx2_napi *bnapi;
6374         struct bnx2_tx_ring_info *txr;
6375         struct netdev_queue *txq;
6376
6377         /*  Determine which tx ring we will be placed on */
6378         i = skb_get_queue_mapping(skb);
6379         bnapi = &bp->bnx2_napi[i];
6380         txr = &bnapi->tx_ring;
6381         txq = netdev_get_tx_queue(dev, i);
6382
6383         if (unlikely(bnx2_tx_avail(bp, txr) <
6384             (skb_shinfo(skb)->nr_frags + 1))) {
6385                 netif_tx_stop_queue(txq);
6386                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6387
6388                 return NETDEV_TX_BUSY;
6389         }
6390         len = skb_headlen(skb);
6391         prod = txr->tx_prod;
6392         ring_prod = TX_RING_IDX(prod);
6393
6394         vlan_tag_flags = 0;
6395         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6396                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6397         }
6398
6399         if (vlan_tx_tag_present(skb)) {
6400                 vlan_tag_flags |=
6401                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6402         }
6403
6404         if ((mss = skb_shinfo(skb)->gso_size)) {
6405                 u32 tcp_opt_len;
6406                 struct iphdr *iph;
6407
6408                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6409
6410                 tcp_opt_len = tcp_optlen(skb);
6411
6412                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6413                         u32 tcp_off = skb_transport_offset(skb) -
6414                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6415
6416                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6417                                           TX_BD_FLAGS_SW_FLAGS;
6418                         if (likely(tcp_off == 0))
6419                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6420                         else {
6421                                 tcp_off >>= 3;
6422                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6423                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6424                                                   ((tcp_off & 0x10) <<
6425                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6426                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6427                         }
6428                 } else {
6429                         iph = ip_hdr(skb);
6430                         if (tcp_opt_len || (iph->ihl > 5)) {
6431                                 vlan_tag_flags |= ((iph->ihl - 5) +
6432                                                    (tcp_opt_len >> 2)) << 8;
6433                         }
6434                 }
6435         } else
6436                 mss = 0;
6437
6438         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6439         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6440                 dev_kfree_skb(skb);
6441                 return NETDEV_TX_OK;
6442         }
6443
6444         tx_buf = &txr->tx_buf_ring[ring_prod];
6445         tx_buf->skb = skb;
6446         dma_unmap_addr_set(tx_buf, mapping, mapping);
6447
6448         txbd = &txr->tx_desc_ring[ring_prod];
6449
6450         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6451         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6452         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6453         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6454
6455         last_frag = skb_shinfo(skb)->nr_frags;
6456         tx_buf->nr_frags = last_frag;
6457         tx_buf->is_gso = skb_is_gso(skb);
6458
6459         for (i = 0; i < last_frag; i++) {
6460                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6461
6462                 prod = NEXT_TX_BD(prod);
6463                 ring_prod = TX_RING_IDX(prod);
6464                 txbd = &txr->tx_desc_ring[ring_prod];
6465
6466                 len = frag->size;
6467                 mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
6468                                        len, PCI_DMA_TODEVICE);
6469                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6470                         goto dma_error;
6471                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6472                                    mapping);
6473
6474                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6475                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6476                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6477                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6478
6479         }
6480         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6481
6482         prod = NEXT_TX_BD(prod);
6483         txr->tx_prod_bseq += skb->len;
6484
6485         REG_WR16(bp, txr->tx_bidx_addr, prod);
6486         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6487
6488         mmiowb();
6489
6490         txr->tx_prod = prod;
6491
6492         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6493                 netif_tx_stop_queue(txq);
6494
6495                 /* netif_tx_stop_queue() must be done before checking
6496                  * tx index in bnx2_tx_avail() below, because in
6497                  * bnx2_tx_int(), we update tx index before checking for
6498                  * netif_tx_queue_stopped().
6499                  */
6500                 smp_mb();
6501                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6502                         netif_tx_wake_queue(txq);
6503         }
6504
6505         return NETDEV_TX_OK;
6506 dma_error:
6507         /* save value of frag that failed */
6508         last_frag = i;
6509
6510         /* start back at beginning and unmap skb */
6511         prod = txr->tx_prod;
6512         ring_prod = TX_RING_IDX(prod);
6513         tx_buf = &txr->tx_buf_ring[ring_prod];
6514         tx_buf->skb = NULL;
6515         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6516                          skb_headlen(skb), PCI_DMA_TODEVICE);
6517
6518         /* unmap remaining mapped pages */
6519         for (i = 0; i < last_frag; i++) {
6520                 prod = NEXT_TX_BD(prod);
6521                 ring_prod = TX_RING_IDX(prod);
6522                 tx_buf = &txr->tx_buf_ring[ring_prod];
6523                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6524                                skb_shinfo(skb)->frags[i].size,
6525                                PCI_DMA_TODEVICE);
6526         }
6527
6528         dev_kfree_skb(skb);
6529         return NETDEV_TX_OK;
6530 }
6531
6532 /* Called with rtnl_lock */
6533 static int
6534 bnx2_close(struct net_device *dev)
6535 {
6536         struct bnx2 *bp = netdev_priv(dev);
6537
6538         cancel_work_sync(&bp->reset_task);
6539
6540         bnx2_disable_int_sync(bp);
6541         bnx2_napi_disable(bp);
6542         del_timer_sync(&bp->timer);
6543         bnx2_shutdown_chip(bp);
6544         bnx2_free_irq(bp);
6545         bnx2_free_skbs(bp);
6546         bnx2_free_mem(bp);
6547         bnx2_del_napi(bp);
6548         bp->link_up = 0;
6549         netif_carrier_off(bp->dev);
6550         bnx2_set_power_state(bp, PCI_D3hot);
6551         return 0;
6552 }
6553
6554 static void
6555 bnx2_save_stats(struct bnx2 *bp)
6556 {
6557         u32 *hw_stats = (u32 *) bp->stats_blk;
6558         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6559         int i;
6560
6561         /* The 1st 10 counters are 64-bit counters */
6562         for (i = 0; i < 20; i += 2) {
6563                 u32 hi;
6564                 u64 lo;
6565
6566                 hi = temp_stats[i] + hw_stats[i];
6567                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6568                 if (lo > 0xffffffff)
6569                         hi++;
6570                 temp_stats[i] = hi;
6571                 temp_stats[i + 1] = lo & 0xffffffff;
6572         }
6573
6574         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6575                 temp_stats[i] += hw_stats[i];
6576 }
6577
6578 #define GET_64BIT_NET_STATS64(ctr)              \
6579         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6580
6581 #define GET_64BIT_NET_STATS(ctr)                                \
6582         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6583         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6584
6585 #define GET_32BIT_NET_STATS(ctr)                                \
6586         (unsigned long) (bp->stats_blk->ctr +                   \
6587                          bp->temp_stats_blk->ctr)
6588
6589 static struct rtnl_link_stats64 *
6590 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6591 {
6592         struct bnx2 *bp = netdev_priv(dev);
6593
6594         if (bp->stats_blk == NULL)
6595                 return net_stats;
6596
6597         net_stats->rx_packets =
6598                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6599                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6600                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6601
6602         net_stats->tx_packets =
6603                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6604                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6605                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6606
6607         net_stats->rx_bytes =
6608                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6609
6610         net_stats->tx_bytes =
6611                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6612
6613         net_stats->multicast =
6614                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6615
6616         net_stats->collisions =
6617                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6618
6619         net_stats->rx_length_errors =
6620                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6621                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6622
6623         net_stats->rx_over_errors =
6624                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6625                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6626
6627         net_stats->rx_frame_errors =
6628                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6629
6630         net_stats->rx_crc_errors =
6631                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6632
6633         net_stats->rx_errors = net_stats->rx_length_errors +
6634                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6635                 net_stats->rx_crc_errors;
6636
6637         net_stats->tx_aborted_errors =
6638                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6639                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6640
6641         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6642             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6643                 net_stats->tx_carrier_errors = 0;
6644         else {
6645                 net_stats->tx_carrier_errors =
6646                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6647         }
6648
6649         net_stats->tx_errors =
6650                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6651                 net_stats->tx_aborted_errors +
6652                 net_stats->tx_carrier_errors;
6653
6654         net_stats->rx_missed_errors =
6655                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6656                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6657                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6658
6659         return net_stats;
6660 }
6661
6662 /* All ethtool functions called with rtnl_lock */
6663
6664 static int
6665 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6666 {
6667         struct bnx2 *bp = netdev_priv(dev);
6668         int support_serdes = 0, support_copper = 0;
6669
6670         cmd->supported = SUPPORTED_Autoneg;
6671         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6672                 support_serdes = 1;
6673                 support_copper = 1;
6674         } else if (bp->phy_port == PORT_FIBRE)
6675                 support_serdes = 1;
6676         else
6677                 support_copper = 1;
6678
6679         if (support_serdes) {
6680                 cmd->supported |= SUPPORTED_1000baseT_Full |
6681                         SUPPORTED_FIBRE;
6682                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6683                         cmd->supported |= SUPPORTED_2500baseX_Full;
6684
6685         }
6686         if (support_copper) {
6687                 cmd->supported |= SUPPORTED_10baseT_Half |
6688                         SUPPORTED_10baseT_Full |
6689                         SUPPORTED_100baseT_Half |
6690                         SUPPORTED_100baseT_Full |
6691                         SUPPORTED_1000baseT_Full |
6692                         SUPPORTED_TP;
6693
6694         }
6695
6696         spin_lock_bh(&bp->phy_lock);
6697         cmd->port = bp->phy_port;
6698         cmd->advertising = bp->advertising;
6699
6700         if (bp->autoneg & AUTONEG_SPEED) {
6701                 cmd->autoneg = AUTONEG_ENABLE;
6702         } else {
6703                 cmd->autoneg = AUTONEG_DISABLE;
6704         }
6705
6706         if (netif_carrier_ok(dev)) {
6707                 ethtool_cmd_speed_set(cmd, bp->line_speed);
6708                 cmd->duplex = bp->duplex;
6709         }
6710         else {
6711                 ethtool_cmd_speed_set(cmd, -1);
6712                 cmd->duplex = -1;
6713         }
6714         spin_unlock_bh(&bp->phy_lock);
6715
6716         cmd->transceiver = XCVR_INTERNAL;
6717         cmd->phy_address = bp->phy_addr;
6718
6719         return 0;
6720 }
6721
6722 static int
6723 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6724 {
6725         struct bnx2 *bp = netdev_priv(dev);
6726         u8 autoneg = bp->autoneg;
6727         u8 req_duplex = bp->req_duplex;
6728         u16 req_line_speed = bp->req_line_speed;
6729         u32 advertising = bp->advertising;
6730         int err = -EINVAL;
6731
6732         spin_lock_bh(&bp->phy_lock);
6733
6734         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6735                 goto err_out_unlock;
6736
6737         if (cmd->port != bp->phy_port &&
6738             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6739                 goto err_out_unlock;
6740
6741         /* If device is down, we can store the settings only if the user
6742          * is setting the currently active port.
6743          */
6744         if (!netif_running(dev) && cmd->port != bp->phy_port)
6745                 goto err_out_unlock;
6746
6747         if (cmd->autoneg == AUTONEG_ENABLE) {
6748                 autoneg |= AUTONEG_SPEED;
6749
6750                 advertising = cmd->advertising;
6751                 if (cmd->port == PORT_TP) {
6752                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6753                         if (!advertising)
6754                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6755                 } else {
6756                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6757                         if (!advertising)
6758                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6759                 }
6760                 advertising |= ADVERTISED_Autoneg;
6761         }
6762         else {
6763                 u32 speed = ethtool_cmd_speed(cmd);
6764                 if (cmd->port == PORT_FIBRE) {
6765                         if ((speed != SPEED_1000 &&
6766                              speed != SPEED_2500) ||
6767                             (cmd->duplex != DUPLEX_FULL))
6768                                 goto err_out_unlock;
6769
6770                         if (speed == SPEED_2500 &&
6771                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6772                                 goto err_out_unlock;
6773                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6774                         goto err_out_unlock;
6775
6776                 autoneg &= ~AUTONEG_SPEED;
6777                 req_line_speed = speed;
6778                 req_duplex = cmd->duplex;
6779                 advertising = 0;
6780         }
6781
6782         bp->autoneg = autoneg;
6783         bp->advertising = advertising;
6784         bp->req_line_speed = req_line_speed;
6785         bp->req_duplex = req_duplex;
6786
6787         err = 0;
6788         /* If device is down, the new settings will be picked up when it is
6789          * brought up.
6790          */
6791         if (netif_running(dev))
6792                 err = bnx2_setup_phy(bp, cmd->port);
6793
6794 err_out_unlock:
6795         spin_unlock_bh(&bp->phy_lock);
6796
6797         return err;
6798 }
6799
6800 static void
6801 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6802 {
6803         struct bnx2 *bp = netdev_priv(dev);
6804
6805         strcpy(info->driver, DRV_MODULE_NAME);
6806         strcpy(info->version, DRV_MODULE_VERSION);
6807         strcpy(info->bus_info, pci_name(bp->pdev));
6808         strcpy(info->fw_version, bp->fw_version);
6809 }
6810
6811 #define BNX2_REGDUMP_LEN                (32 * 1024)
6812
6813 static int
6814 bnx2_get_regs_len(struct net_device *dev)
6815 {
6816         return BNX2_REGDUMP_LEN;
6817 }
6818
6819 static void
6820 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6821 {
6822         u32 *p = _p, i, offset;
6823         u8 *orig_p = _p;
6824         struct bnx2 *bp = netdev_priv(dev);
6825         static const u32 reg_boundaries[] = {
6826                 0x0000, 0x0098, 0x0400, 0x045c,
6827                 0x0800, 0x0880, 0x0c00, 0x0c10,
6828                 0x0c30, 0x0d08, 0x1000, 0x101c,
6829                 0x1040, 0x1048, 0x1080, 0x10a4,
6830                 0x1400, 0x1490, 0x1498, 0x14f0,
6831                 0x1500, 0x155c, 0x1580, 0x15dc,
6832                 0x1600, 0x1658, 0x1680, 0x16d8,
6833                 0x1800, 0x1820, 0x1840, 0x1854,
6834                 0x1880, 0x1894, 0x1900, 0x1984,
6835                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6836                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6837                 0x2000, 0x2030, 0x23c0, 0x2400,
6838                 0x2800, 0x2820, 0x2830, 0x2850,
6839                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6840                 0x3c00, 0x3c94, 0x4000, 0x4010,
6841                 0x4080, 0x4090, 0x43c0, 0x4458,
6842                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6843                 0x4fc0, 0x5010, 0x53c0, 0x5444,
6844                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6845                 0x5fc0, 0x6000, 0x6400, 0x6428,
6846                 0x6800, 0x6848, 0x684c, 0x6860,
6847                 0x6888, 0x6910, 0x8000
6848         };
6849
6850         regs->version = 0;
6851
6852         memset(p, 0, BNX2_REGDUMP_LEN);
6853
6854         if (!netif_running(bp->dev))
6855                 return;
6856
6857         i = 0;
6858         offset = reg_boundaries[0];
6859         p += offset;
6860         while (offset < BNX2_REGDUMP_LEN) {
6861                 *p++ = REG_RD(bp, offset);
6862                 offset += 4;
6863                 if (offset == reg_boundaries[i + 1]) {
6864                         offset = reg_boundaries[i + 2];
6865                         p = (u32 *) (orig_p + offset);
6866                         i += 2;
6867                 }
6868         }
6869 }
6870
6871 static void
6872 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6873 {
6874         struct bnx2 *bp = netdev_priv(dev);
6875
6876         if (bp->flags & BNX2_FLAG_NO_WOL) {
6877                 wol->supported = 0;
6878                 wol->wolopts = 0;
6879         }
6880         else {
6881                 wol->supported = WAKE_MAGIC;
6882                 if (bp->wol)
6883                         wol->wolopts = WAKE_MAGIC;
6884                 else
6885                         wol->wolopts = 0;
6886         }
6887         memset(&wol->sopass, 0, sizeof(wol->sopass));
6888 }
6889
6890 static int
6891 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6892 {
6893         struct bnx2 *bp = netdev_priv(dev);
6894
6895         if (wol->wolopts & ~WAKE_MAGIC)
6896                 return -EINVAL;
6897
6898         if (wol->wolopts & WAKE_MAGIC) {
6899                 if (bp->flags & BNX2_FLAG_NO_WOL)
6900                         return -EINVAL;
6901
6902                 bp->wol = 1;
6903         }
6904         else {
6905                 bp->wol = 0;
6906         }
6907         return 0;
6908 }
6909
6910 static int
6911 bnx2_nway_reset(struct net_device *dev)
6912 {
6913         struct bnx2 *bp = netdev_priv(dev);
6914         u32 bmcr;
6915
6916         if (!netif_running(dev))
6917                 return -EAGAIN;
6918
6919         if (!(bp->autoneg & AUTONEG_SPEED)) {
6920                 return -EINVAL;
6921         }
6922
6923         spin_lock_bh(&bp->phy_lock);
6924
6925         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6926                 int rc;
6927
6928                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6929                 spin_unlock_bh(&bp->phy_lock);
6930                 return rc;
6931         }
6932
6933         /* Force a link down visible on the other side */
6934         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6935                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6936                 spin_unlock_bh(&bp->phy_lock);
6937
6938                 msleep(20);
6939
6940                 spin_lock_bh(&bp->phy_lock);
6941
6942                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6943                 bp->serdes_an_pending = 1;
6944                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6945         }
6946
6947         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6948         bmcr &= ~BMCR_LOOPBACK;
6949         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6950
6951         spin_unlock_bh(&bp->phy_lock);
6952
6953         return 0;
6954 }
6955
6956 static u32
6957 bnx2_get_link(struct net_device *dev)
6958 {
6959         struct bnx2 *bp = netdev_priv(dev);
6960
6961         return bp->link_up;
6962 }
6963
6964 static int
6965 bnx2_get_eeprom_len(struct net_device *dev)
6966 {
6967         struct bnx2 *bp = netdev_priv(dev);
6968
6969         if (bp->flash_info == NULL)
6970                 return 0;
6971
6972         return (int) bp->flash_size;
6973 }
6974
6975 static int
6976 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6977                 u8 *eebuf)
6978 {
6979         struct bnx2 *bp = netdev_priv(dev);
6980         int rc;
6981
6982         if (!netif_running(dev))
6983                 return -EAGAIN;
6984
6985         /* parameters already validated in ethtool_get_eeprom */
6986
6987         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6988
6989         return rc;
6990 }
6991
6992 static int
6993 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6994                 u8 *eebuf)
6995 {
6996         struct bnx2 *bp = netdev_priv(dev);
6997         int rc;
6998
6999         if (!netif_running(dev))
7000                 return -EAGAIN;
7001
7002         /* parameters already validated in ethtool_set_eeprom */
7003
7004         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7005
7006         return rc;
7007 }
7008
7009 static int
7010 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7011 {
7012         struct bnx2 *bp = netdev_priv(dev);
7013
7014         memset(coal, 0, sizeof(struct ethtool_coalesce));
7015
7016         coal->rx_coalesce_usecs = bp->rx_ticks;
7017         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7018         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7019         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7020
7021         coal->tx_coalesce_usecs = bp->tx_ticks;
7022         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7023         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7024         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7025
7026         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7027
7028         return 0;
7029 }
7030
7031 static int
7032 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7033 {
7034         struct bnx2 *bp = netdev_priv(dev);
7035
7036         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7037         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7038
7039         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7040         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7041
7042         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7043         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7044
7045         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7046         if (bp->rx_quick_cons_trip_int > 0xff)
7047                 bp->rx_quick_cons_trip_int = 0xff;
7048
7049         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7050         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7051
7052         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7053         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7054
7055         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7056         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7057
7058         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7059         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7060                 0xff;
7061
7062         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7063         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7064                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7065                         bp->stats_ticks = USEC_PER_SEC;
7066         }
7067         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7068                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7069         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7070
7071         if (netif_running(bp->dev)) {
7072                 bnx2_netif_stop(bp, true);
7073                 bnx2_init_nic(bp, 0);
7074                 bnx2_netif_start(bp, true);
7075         }
7076
7077         return 0;
7078 }
7079
7080 static void
7081 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7082 {
7083         struct bnx2 *bp = netdev_priv(dev);
7084
7085         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7086         ering->rx_mini_max_pending = 0;
7087         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7088
7089         ering->rx_pending = bp->rx_ring_size;
7090         ering->rx_mini_pending = 0;
7091         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7092
7093         ering->tx_max_pending = MAX_TX_DESC_CNT;
7094         ering->tx_pending = bp->tx_ring_size;
7095 }
7096
7097 static int
7098 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7099 {
7100         if (netif_running(bp->dev)) {
7101                 /* Reset will erase chipset stats; save them */
7102                 bnx2_save_stats(bp);
7103
7104                 bnx2_netif_stop(bp, true);
7105                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7106                 __bnx2_free_irq(bp);
7107                 bnx2_free_skbs(bp);
7108                 bnx2_free_mem(bp);
7109         }
7110
7111         bnx2_set_rx_ring_size(bp, rx);
7112         bp->tx_ring_size = tx;
7113
7114         if (netif_running(bp->dev)) {
7115                 int rc;
7116
7117                 rc = bnx2_alloc_mem(bp);
7118                 if (!rc)
7119                         rc = bnx2_request_irq(bp);
7120
7121                 if (!rc)
7122                         rc = bnx2_init_nic(bp, 0);
7123
7124                 if (rc) {
7125                         bnx2_napi_enable(bp);
7126                         dev_close(bp->dev);
7127                         return rc;
7128                 }
7129 #ifdef BCM_CNIC
7130                 mutex_lock(&bp->cnic_lock);
7131                 /* Let cnic know about the new status block. */
7132                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7133                         bnx2_setup_cnic_irq_info(bp);
7134                 mutex_unlock(&bp->cnic_lock);
7135 #endif
7136                 bnx2_netif_start(bp, true);
7137         }
7138         return 0;
7139 }
7140
7141 static int
7142 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7143 {
7144         struct bnx2 *bp = netdev_priv(dev);
7145         int rc;
7146
7147         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7148                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7149                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7150
7151                 return -EINVAL;
7152         }
7153         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7154         return rc;
7155 }
7156
7157 static void
7158 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7159 {
7160         struct bnx2 *bp = netdev_priv(dev);
7161
7162         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7163         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7164         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7165 }
7166
7167 static int
7168 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7169 {
7170         struct bnx2 *bp = netdev_priv(dev);
7171
7172         bp->req_flow_ctrl = 0;
7173         if (epause->rx_pause)
7174                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7175         if (epause->tx_pause)
7176                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7177
7178         if (epause->autoneg) {
7179                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7180         }
7181         else {
7182                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7183         }
7184
7185         if (netif_running(dev)) {
7186                 spin_lock_bh(&bp->phy_lock);
7187                 bnx2_setup_phy(bp, bp->phy_port);
7188                 spin_unlock_bh(&bp->phy_lock);
7189         }
7190
7191         return 0;
7192 }
7193
7194 static struct {
7195         char string[ETH_GSTRING_LEN];
7196 } bnx2_stats_str_arr[] = {
7197         { "rx_bytes" },
7198         { "rx_error_bytes" },
7199         { "tx_bytes" },
7200         { "tx_error_bytes" },
7201         { "rx_ucast_packets" },
7202         { "rx_mcast_packets" },
7203         { "rx_bcast_packets" },
7204         { "tx_ucast_packets" },
7205         { "tx_mcast_packets" },
7206         { "tx_bcast_packets" },
7207         { "tx_mac_errors" },
7208         { "tx_carrier_errors" },
7209         { "rx_crc_errors" },
7210         { "rx_align_errors" },
7211         { "tx_single_collisions" },
7212         { "tx_multi_collisions" },
7213         { "tx_deferred" },
7214         { "tx_excess_collisions" },
7215         { "tx_late_collisions" },
7216         { "tx_total_collisions" },
7217         { "rx_fragments" },
7218         { "rx_jabbers" },
7219         { "rx_undersize_packets" },
7220         { "rx_oversize_packets" },
7221         { "rx_64_byte_packets" },
7222         { "rx_65_to_127_byte_packets" },
7223         { "rx_128_to_255_byte_packets" },
7224         { "rx_256_to_511_byte_packets" },
7225         { "rx_512_to_1023_byte_packets" },
7226         { "rx_1024_to_1522_byte_packets" },
7227         { "rx_1523_to_9022_byte_packets" },
7228         { "tx_64_byte_packets" },
7229         { "tx_65_to_127_byte_packets" },
7230         { "tx_128_to_255_byte_packets" },
7231         { "tx_256_to_511_byte_packets" },
7232         { "tx_512_to_1023_byte_packets" },
7233         { "tx_1024_to_1522_byte_packets" },
7234         { "tx_1523_to_9022_byte_packets" },
7235         { "rx_xon_frames" },
7236         { "rx_xoff_frames" },
7237         { "tx_xon_frames" },
7238         { "tx_xoff_frames" },
7239         { "rx_mac_ctrl_frames" },
7240         { "rx_filtered_packets" },
7241         { "rx_ftq_discards" },
7242         { "rx_discards" },
7243         { "rx_fw_discards" },
7244 };
7245
7246 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7247                         sizeof(bnx2_stats_str_arr[0]))
7248
7249 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7250
7251 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7252     STATS_OFFSET32(stat_IfHCInOctets_hi),
7253     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7254     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7255     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7256     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7257     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7258     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7259     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7260     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7261     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7262     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7263     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7264     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7265     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7266     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7267     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7268     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7269     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7270     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7271     STATS_OFFSET32(stat_EtherStatsCollisions),
7272     STATS_OFFSET32(stat_EtherStatsFragments),
7273     STATS_OFFSET32(stat_EtherStatsJabbers),
7274     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7275     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7276     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7277     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7278     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7279     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7280     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7281     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7282     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7283     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7284     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7285     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7286     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7287     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7288     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7289     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7290     STATS_OFFSET32(stat_XonPauseFramesReceived),
7291     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7292     STATS_OFFSET32(stat_OutXonSent),
7293     STATS_OFFSET32(stat_OutXoffSent),
7294     STATS_OFFSET32(stat_MacControlFramesReceived),
7295     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7296     STATS_OFFSET32(stat_IfInFTQDiscards),
7297     STATS_OFFSET32(stat_IfInMBUFDiscards),
7298     STATS_OFFSET32(stat_FwRxDrop),
7299 };
7300
7301 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7302  * skipped because of errata.
7303  */
7304 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7305         8,0,8,8,8,8,8,8,8,8,
7306         4,0,4,4,4,4,4,4,4,4,
7307         4,4,4,4,4,4,4,4,4,4,
7308         4,4,4,4,4,4,4,4,4,4,
7309         4,4,4,4,4,4,4,
7310 };
7311
7312 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7313         8,0,8,8,8,8,8,8,8,8,
7314         4,4,4,4,4,4,4,4,4,4,
7315         4,4,4,4,4,4,4,4,4,4,
7316         4,4,4,4,4,4,4,4,4,4,
7317         4,4,4,4,4,4,4,
7318 };
7319
7320 #define BNX2_NUM_TESTS 6
7321
7322 static struct {
7323         char string[ETH_GSTRING_LEN];
7324 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7325         { "register_test (offline)" },
7326         { "memory_test (offline)" },
7327         { "loopback_test (offline)" },
7328         { "nvram_test (online)" },
7329         { "interrupt_test (online)" },
7330         { "link_test (online)" },
7331 };
7332
7333 static int
7334 bnx2_get_sset_count(struct net_device *dev, int sset)
7335 {
7336         switch (sset) {
7337         case ETH_SS_TEST:
7338                 return BNX2_NUM_TESTS;
7339         case ETH_SS_STATS:
7340                 return BNX2_NUM_STATS;
7341         default:
7342                 return -EOPNOTSUPP;
7343         }
7344 }
7345
7346 static void
7347 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7348 {
7349         struct bnx2 *bp = netdev_priv(dev);
7350
7351         bnx2_set_power_state(bp, PCI_D0);
7352
7353         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7354         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7355                 int i;
7356
7357                 bnx2_netif_stop(bp, true);
7358                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7359                 bnx2_free_skbs(bp);
7360
7361                 if (bnx2_test_registers(bp) != 0) {
7362                         buf[0] = 1;
7363                         etest->flags |= ETH_TEST_FL_FAILED;
7364                 }
7365                 if (bnx2_test_memory(bp) != 0) {
7366                         buf[1] = 1;
7367                         etest->flags |= ETH_TEST_FL_FAILED;
7368                 }
7369                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7370                         etest->flags |= ETH_TEST_FL_FAILED;
7371
7372                 if (!netif_running(bp->dev))
7373                         bnx2_shutdown_chip(bp);
7374                 else {
7375                         bnx2_init_nic(bp, 1);
7376                         bnx2_netif_start(bp, true);
7377                 }
7378
7379                 /* wait for link up */
7380                 for (i = 0; i < 7; i++) {
7381                         if (bp->link_up)
7382                                 break;
7383                         msleep_interruptible(1000);
7384                 }
7385         }
7386
7387         if (bnx2_test_nvram(bp) != 0) {
7388                 buf[3] = 1;
7389                 etest->flags |= ETH_TEST_FL_FAILED;
7390         }
7391         if (bnx2_test_intr(bp) != 0) {
7392                 buf[4] = 1;
7393                 etest->flags |= ETH_TEST_FL_FAILED;
7394         }
7395
7396         if (bnx2_test_link(bp) != 0) {
7397                 buf[5] = 1;
7398                 etest->flags |= ETH_TEST_FL_FAILED;
7399
7400         }
7401         if (!netif_running(bp->dev))
7402                 bnx2_set_power_state(bp, PCI_D3hot);
7403 }
7404
7405 static void
7406 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7407 {
7408         switch (stringset) {
7409         case ETH_SS_STATS:
7410                 memcpy(buf, bnx2_stats_str_arr,
7411                         sizeof(bnx2_stats_str_arr));
7412                 break;
7413         case ETH_SS_TEST:
7414                 memcpy(buf, bnx2_tests_str_arr,
7415                         sizeof(bnx2_tests_str_arr));
7416                 break;
7417         }
7418 }
7419
7420 static void
7421 bnx2_get_ethtool_stats(struct net_device *dev,
7422                 struct ethtool_stats *stats, u64 *buf)
7423 {
7424         struct bnx2 *bp = netdev_priv(dev);
7425         int i;
7426         u32 *hw_stats = (u32 *) bp->stats_blk;
7427         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7428         u8 *stats_len_arr = NULL;
7429
7430         if (hw_stats == NULL) {
7431                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7432                 return;
7433         }
7434
7435         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7436             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7437             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7438             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7439                 stats_len_arr = bnx2_5706_stats_len_arr;
7440         else
7441                 stats_len_arr = bnx2_5708_stats_len_arr;
7442
7443         for (i = 0; i < BNX2_NUM_STATS; i++) {
7444                 unsigned long offset;
7445
7446                 if (stats_len_arr[i] == 0) {
7447                         /* skip this counter */
7448                         buf[i] = 0;
7449                         continue;
7450                 }
7451
7452                 offset = bnx2_stats_offset_arr[i];
7453                 if (stats_len_arr[i] == 4) {
7454                         /* 4-byte counter */
7455                         buf[i] = (u64) *(hw_stats + offset) +
7456                                  *(temp_stats + offset);
7457                         continue;
7458                 }
7459                 /* 8-byte counter */
7460                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7461                          *(hw_stats + offset + 1) +
7462                          (((u64) *(temp_stats + offset)) << 32) +
7463                          *(temp_stats + offset + 1);
7464         }
7465 }
7466
7467 static int
7468 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7469 {
7470         struct bnx2 *bp = netdev_priv(dev);
7471
7472         switch (state) {
7473         case ETHTOOL_ID_ACTIVE:
7474                 bnx2_set_power_state(bp, PCI_D0);
7475
7476                 bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7477                 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7478                 return 1;       /* cycle on/off once per second */
7479
7480         case ETHTOOL_ID_ON:
7481                 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7482                        BNX2_EMAC_LED_1000MB_OVERRIDE |
7483                        BNX2_EMAC_LED_100MB_OVERRIDE |
7484                        BNX2_EMAC_LED_10MB_OVERRIDE |
7485                        BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7486                        BNX2_EMAC_LED_TRAFFIC);
7487                 break;
7488
7489         case ETHTOOL_ID_OFF:
7490                 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7491                 break;
7492
7493         case ETHTOOL_ID_INACTIVE:
7494                 REG_WR(bp, BNX2_EMAC_LED, 0);
7495                 REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7496
7497                 if (!netif_running(dev))
7498                         bnx2_set_power_state(bp, PCI_D3hot);
7499                 break;
7500         }
7501
7502         return 0;
7503 }
7504
7505 static u32
7506 bnx2_fix_features(struct net_device *dev, u32 features)
7507 {
7508         struct bnx2 *bp = netdev_priv(dev);
7509
7510         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7511                 features |= NETIF_F_HW_VLAN_RX;
7512
7513         return features;
7514 }
7515
7516 static int
7517 bnx2_set_features(struct net_device *dev, u32 features)
7518 {
7519         struct bnx2 *bp = netdev_priv(dev);
7520
7521         /* TSO with VLAN tag won't work with current firmware */
7522         if (features & NETIF_F_HW_VLAN_TX)
7523                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7524         else
7525                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7526
7527         if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7528             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7529             netif_running(dev)) {
7530                 bnx2_netif_stop(bp, false);
7531                 dev->features = features;
7532                 bnx2_set_rx_mode(dev);
7533                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7534                 bnx2_netif_start(bp, false);
7535                 return 1;
7536         }
7537
7538         return 0;
7539 }
7540
7541 static const struct ethtool_ops bnx2_ethtool_ops = {
7542         .get_settings           = bnx2_get_settings,
7543         .set_settings           = bnx2_set_settings,
7544         .get_drvinfo            = bnx2_get_drvinfo,
7545         .get_regs_len           = bnx2_get_regs_len,
7546         .get_regs               = bnx2_get_regs,
7547         .get_wol                = bnx2_get_wol,
7548         .set_wol                = bnx2_set_wol,
7549         .nway_reset             = bnx2_nway_reset,
7550         .get_link               = bnx2_get_link,
7551         .get_eeprom_len         = bnx2_get_eeprom_len,
7552         .get_eeprom             = bnx2_get_eeprom,
7553         .set_eeprom             = bnx2_set_eeprom,
7554         .get_coalesce           = bnx2_get_coalesce,
7555         .set_coalesce           = bnx2_set_coalesce,
7556         .get_ringparam          = bnx2_get_ringparam,
7557         .set_ringparam          = bnx2_set_ringparam,
7558         .get_pauseparam         = bnx2_get_pauseparam,
7559         .set_pauseparam         = bnx2_set_pauseparam,
7560         .self_test              = bnx2_self_test,
7561         .get_strings            = bnx2_get_strings,
7562         .set_phys_id            = bnx2_set_phys_id,
7563         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7564         .get_sset_count         = bnx2_get_sset_count,
7565 };
7566
7567 /* Called with rtnl_lock */
7568 static int
7569 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7570 {
7571         struct mii_ioctl_data *data = if_mii(ifr);
7572         struct bnx2 *bp = netdev_priv(dev);
7573         int err;
7574
7575         switch(cmd) {
7576         case SIOCGMIIPHY:
7577                 data->phy_id = bp->phy_addr;
7578
7579                 /* fallthru */
7580         case SIOCGMIIREG: {
7581                 u32 mii_regval;
7582
7583                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7584                         return -EOPNOTSUPP;
7585
7586                 if (!netif_running(dev))
7587                         return -EAGAIN;
7588
7589                 spin_lock_bh(&bp->phy_lock);
7590                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7591                 spin_unlock_bh(&bp->phy_lock);
7592
7593                 data->val_out = mii_regval;
7594
7595                 return err;
7596         }
7597
7598         case SIOCSMIIREG:
7599                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7600                         return -EOPNOTSUPP;
7601
7602                 if (!netif_running(dev))
7603                         return -EAGAIN;
7604
7605                 spin_lock_bh(&bp->phy_lock);
7606                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7607                 spin_unlock_bh(&bp->phy_lock);
7608
7609                 return err;
7610
7611         default:
7612                 /* do nothing */
7613                 break;
7614         }
7615         return -EOPNOTSUPP;
7616 }
7617
7618 /* Called with rtnl_lock */
7619 static int
7620 bnx2_change_mac_addr(struct net_device *dev, void *p)
7621 {
7622         struct sockaddr *addr = p;
7623         struct bnx2 *bp = netdev_priv(dev);
7624
7625         if (!is_valid_ether_addr(addr->sa_data))
7626                 return -EINVAL;
7627
7628         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7629         if (netif_running(dev))
7630                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7631
7632         return 0;
7633 }
7634
7635 /* Called with rtnl_lock */
7636 static int
7637 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7638 {
7639         struct bnx2 *bp = netdev_priv(dev);
7640
7641         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7642                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7643                 return -EINVAL;
7644
7645         dev->mtu = new_mtu;
7646         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
7647 }
7648
7649 #ifdef CONFIG_NET_POLL_CONTROLLER
7650 static void
7651 poll_bnx2(struct net_device *dev)
7652 {
7653         struct bnx2 *bp = netdev_priv(dev);
7654         int i;
7655
7656         for (i = 0; i < bp->irq_nvecs; i++) {
7657                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7658
7659                 disable_irq(irq->vector);
7660                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7661                 enable_irq(irq->vector);
7662         }
7663 }
7664 #endif
7665
7666 static void __devinit
7667 bnx2_get_5709_media(struct bnx2 *bp)
7668 {
7669         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7670         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7671         u32 strap;
7672
7673         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7674                 return;
7675         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7676                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7677                 return;
7678         }
7679
7680         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7681                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7682         else
7683                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7684
7685         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7686                 switch (strap) {
7687                 case 0x4:
7688                 case 0x5:
7689                 case 0x6:
7690                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7691                         return;
7692                 }
7693         } else {
7694                 switch (strap) {
7695                 case 0x1:
7696                 case 0x2:
7697                 case 0x4:
7698                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7699                         return;
7700                 }
7701         }
7702 }
7703
7704 static void __devinit
7705 bnx2_get_pci_speed(struct bnx2 *bp)
7706 {
7707         u32 reg;
7708
7709         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7710         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7711                 u32 clkreg;
7712
7713                 bp->flags |= BNX2_FLAG_PCIX;
7714
7715                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7716
7717                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7718                 switch (clkreg) {
7719                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7720                         bp->bus_speed_mhz = 133;
7721                         break;
7722
7723                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7724                         bp->bus_speed_mhz = 100;
7725                         break;
7726
7727                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7728                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7729                         bp->bus_speed_mhz = 66;
7730                         break;
7731
7732                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7733                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7734                         bp->bus_speed_mhz = 50;
7735                         break;
7736
7737                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7738                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7739                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7740                         bp->bus_speed_mhz = 33;
7741                         break;
7742                 }
7743         }
7744         else {
7745                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7746                         bp->bus_speed_mhz = 66;
7747                 else
7748                         bp->bus_speed_mhz = 33;
7749         }
7750
7751         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7752                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7753
7754 }
7755
7756 static void __devinit
7757 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7758 {
7759         int rc, i, j;
7760         u8 *data;
7761         unsigned int block_end, rosize, len;
7762
7763 #define BNX2_VPD_NVRAM_OFFSET   0x300
7764 #define BNX2_VPD_LEN            128
7765 #define BNX2_MAX_VER_SLEN       30
7766
7767         data = kmalloc(256, GFP_KERNEL);
7768         if (!data)
7769                 return;
7770
7771         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7772                              BNX2_VPD_LEN);
7773         if (rc)
7774                 goto vpd_done;
7775
7776         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7777                 data[i] = data[i + BNX2_VPD_LEN + 3];
7778                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7779                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7780                 data[i + 3] = data[i + BNX2_VPD_LEN];
7781         }
7782
7783         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7784         if (i < 0)
7785                 goto vpd_done;
7786
7787         rosize = pci_vpd_lrdt_size(&data[i]);
7788         i += PCI_VPD_LRDT_TAG_SIZE;
7789         block_end = i + rosize;
7790
7791         if (block_end > BNX2_VPD_LEN)
7792                 goto vpd_done;
7793
7794         j = pci_vpd_find_info_keyword(data, i, rosize,
7795                                       PCI_VPD_RO_KEYWORD_MFR_ID);
7796         if (j < 0)
7797                 goto vpd_done;
7798
7799         len = pci_vpd_info_field_size(&data[j]);
7800
7801         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7802         if (j + len > block_end || len != 4 ||
7803             memcmp(&data[j], "1028", 4))
7804                 goto vpd_done;
7805
7806         j = pci_vpd_find_info_keyword(data, i, rosize,
7807                                       PCI_VPD_RO_KEYWORD_VENDOR0);
7808         if (j < 0)
7809                 goto vpd_done;
7810
7811         len = pci_vpd_info_field_size(&data[j]);
7812
7813         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7814         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7815                 goto vpd_done;
7816
7817         memcpy(bp->fw_version, &data[j], len);
7818         bp->fw_version[len] = ' ';
7819
7820 vpd_done:
7821         kfree(data);
7822 }
7823
7824 static int __devinit
7825 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7826 {
7827         struct bnx2 *bp;
7828         unsigned long mem_len;
7829         int rc, i, j;
7830         u32 reg;
7831         u64 dma_mask, persist_dma_mask;
7832         int err;
7833
7834         SET_NETDEV_DEV(dev, &pdev->dev);
7835         bp = netdev_priv(dev);
7836
7837         bp->flags = 0;
7838         bp->phy_flags = 0;
7839
7840         bp->temp_stats_blk =
7841                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7842
7843         if (bp->temp_stats_blk == NULL) {
7844                 rc = -ENOMEM;
7845                 goto err_out;
7846         }
7847
7848         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7849         rc = pci_enable_device(pdev);
7850         if (rc) {
7851                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7852                 goto err_out;
7853         }
7854
7855         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7856                 dev_err(&pdev->dev,
7857                         "Cannot find PCI device base address, aborting\n");
7858                 rc = -ENODEV;
7859                 goto err_out_disable;
7860         }
7861
7862         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7863         if (rc) {
7864                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7865                 goto err_out_disable;
7866         }
7867
7868         pci_set_master(pdev);
7869
7870         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7871         if (bp->pm_cap == 0) {
7872                 dev_err(&pdev->dev,
7873                         "Cannot find power management capability, aborting\n");
7874                 rc = -EIO;
7875                 goto err_out_release;
7876         }
7877
7878         bp->dev = dev;
7879         bp->pdev = pdev;
7880
7881         spin_lock_init(&bp->phy_lock);
7882         spin_lock_init(&bp->indirect_lock);
7883 #ifdef BCM_CNIC
7884         mutex_init(&bp->cnic_lock);
7885 #endif
7886         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7887
7888         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7889         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7890         dev->mem_end = dev->mem_start + mem_len;
7891         dev->irq = pdev->irq;
7892
7893         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7894
7895         if (!bp->regview) {
7896                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7897                 rc = -ENOMEM;
7898                 goto err_out_release;
7899         }
7900
7901         bnx2_set_power_state(bp, PCI_D0);
7902
7903         /* Configure byte swap and enable write to the reg_window registers.
7904          * Rely on CPU to do target byte swapping on big endian systems
7905          * The chip's target access swapping will not swap all accesses
7906          */
7907         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
7908                    BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7909                    BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7910
7911         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7912
7913         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7914                 if (!pci_is_pcie(pdev)) {
7915                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
7916                         rc = -EIO;
7917                         goto err_out_unmap;
7918                 }
7919                 bp->flags |= BNX2_FLAG_PCIE;
7920                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7921                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7922
7923                 /* AER (Advanced Error Reporting) hooks */
7924                 err = pci_enable_pcie_error_reporting(pdev);
7925                 if (!err)
7926                         bp->flags |= BNX2_FLAG_AER_ENABLED;
7927
7928         } else {
7929                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7930                 if (bp->pcix_cap == 0) {
7931                         dev_err(&pdev->dev,
7932                                 "Cannot find PCIX capability, aborting\n");
7933                         rc = -EIO;
7934                         goto err_out_unmap;
7935                 }
7936                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7937         }
7938
7939         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7940                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7941                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7942         }
7943
7944         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7945                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7946                         bp->flags |= BNX2_FLAG_MSI_CAP;
7947         }
7948
7949         /* 5708 cannot support DMA addresses > 40-bit.  */
7950         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7951                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7952         else
7953                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7954
7955         /* Configure DMA attributes. */
7956         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7957                 dev->features |= NETIF_F_HIGHDMA;
7958                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7959                 if (rc) {
7960                         dev_err(&pdev->dev,
7961                                 "pci_set_consistent_dma_mask failed, aborting\n");
7962                         goto err_out_unmap;
7963                 }
7964         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7965                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7966                 goto err_out_unmap;
7967         }
7968
7969         if (!(bp->flags & BNX2_FLAG_PCIE))
7970                 bnx2_get_pci_speed(bp);
7971
7972         /* 5706A0 may falsely detect SERR and PERR. */
7973         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7974                 reg = REG_RD(bp, PCI_COMMAND);
7975                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7976                 REG_WR(bp, PCI_COMMAND, reg);
7977         }
7978         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7979                 !(bp->flags & BNX2_FLAG_PCIX)) {
7980
7981                 dev_err(&pdev->dev,
7982                         "5706 A1 can only be used in a PCIX bus, aborting\n");
7983                 goto err_out_unmap;
7984         }
7985
7986         bnx2_init_nvram(bp);
7987
7988         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7989
7990         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7991             BNX2_SHM_HDR_SIGNATURE_SIG) {
7992                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7993
7994                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7995         } else
7996                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7997
7998         /* Get the permanent MAC address.  First we need to make sure the
7999          * firmware is actually running.
8000          */
8001         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8002
8003         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8004             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8005                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8006                 rc = -ENODEV;
8007                 goto err_out_unmap;
8008         }
8009
8010         bnx2_read_vpd_fw_ver(bp);
8011
8012         j = strlen(bp->fw_version);
8013         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8014         for (i = 0; i < 3 && j < 24; i++) {
8015                 u8 num, k, skip0;
8016
8017                 if (i == 0) {
8018                         bp->fw_version[j++] = 'b';
8019                         bp->fw_version[j++] = 'c';
8020                         bp->fw_version[j++] = ' ';
8021                 }
8022                 num = (u8) (reg >> (24 - (i * 8)));
8023                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8024                         if (num >= k || !skip0 || k == 1) {
8025                                 bp->fw_version[j++] = (num / k) + '0';
8026                                 skip0 = 0;
8027                         }
8028                 }
8029                 if (i != 2)
8030                         bp->fw_version[j++] = '.';
8031         }
8032         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8033         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8034                 bp->wol = 1;
8035
8036         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8037                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8038
8039                 for (i = 0; i < 30; i++) {
8040                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8041                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8042                                 break;
8043                         msleep(10);
8044                 }
8045         }
8046         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8047         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8048         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8049             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8050                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8051
8052                 if (j < 32)
8053                         bp->fw_version[j++] = ' ';
8054                 for (i = 0; i < 3 && j < 28; i++) {
8055                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8056                         reg = swab32(reg);
8057                         memcpy(&bp->fw_version[j], &reg, 4);
8058                         j += 4;
8059                 }
8060         }
8061
8062         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8063         bp->mac_addr[0] = (u8) (reg >> 8);
8064         bp->mac_addr[1] = (u8) reg;
8065
8066         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8067         bp->mac_addr[2] = (u8) (reg >> 24);
8068         bp->mac_addr[3] = (u8) (reg >> 16);
8069         bp->mac_addr[4] = (u8) (reg >> 8);
8070         bp->mac_addr[5] = (u8) reg;
8071
8072         bp->tx_ring_size = MAX_TX_DESC_CNT;
8073         bnx2_set_rx_ring_size(bp, 255);
8074
8075         bp->tx_quick_cons_trip_int = 2;
8076         bp->tx_quick_cons_trip = 20;
8077         bp->tx_ticks_int = 18;
8078         bp->tx_ticks = 80;
8079
8080         bp->rx_quick_cons_trip_int = 2;
8081         bp->rx_quick_cons_trip = 12;
8082         bp->rx_ticks_int = 18;
8083         bp->rx_ticks = 18;
8084
8085         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8086
8087         bp->current_interval = BNX2_TIMER_INTERVAL;
8088
8089         bp->phy_addr = 1;
8090
8091         /* Disable WOL support if we are running on a SERDES chip. */
8092         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8093                 bnx2_get_5709_media(bp);
8094         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8095                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8096
8097         bp->phy_port = PORT_TP;
8098         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8099                 bp->phy_port = PORT_FIBRE;
8100                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8101                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8102                         bp->flags |= BNX2_FLAG_NO_WOL;
8103                         bp->wol = 0;
8104                 }
8105                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8106                         /* Don't do parallel detect on this board because of
8107                          * some board problems.  The link will not go down
8108                          * if we do parallel detect.
8109                          */
8110                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8111                             pdev->subsystem_device == 0x310c)
8112                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8113                 } else {
8114                         bp->phy_addr = 2;
8115                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8116                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8117                 }
8118         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8119                    CHIP_NUM(bp) == CHIP_NUM_5708)
8120                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8121         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8122                  (CHIP_REV(bp) == CHIP_REV_Ax ||
8123                   CHIP_REV(bp) == CHIP_REV_Bx))
8124                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8125
8126         bnx2_init_fw_cap(bp);
8127
8128         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8129             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8130             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8131             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8132                 bp->flags |= BNX2_FLAG_NO_WOL;
8133                 bp->wol = 0;
8134         }
8135
8136         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8137                 bp->tx_quick_cons_trip_int =
8138                         bp->tx_quick_cons_trip;
8139                 bp->tx_ticks_int = bp->tx_ticks;
8140                 bp->rx_quick_cons_trip_int =
8141                         bp->rx_quick_cons_trip;
8142                 bp->rx_ticks_int = bp->rx_ticks;
8143                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8144                 bp->com_ticks_int = bp->com_ticks;
8145                 bp->cmd_ticks_int = bp->cmd_ticks;
8146         }
8147
8148         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8149          *
8150          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8151          * with byte enables disabled on the unused 32-bit word.  This is legal
8152          * but causes problems on the AMD 8132 which will eventually stop
8153          * responding after a while.
8154          *
8155          * AMD believes this incompatibility is unique to the 5706, and
8156          * prefers to locally disable MSI rather than globally disabling it.
8157          */
8158         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8159                 struct pci_dev *amd_8132 = NULL;
8160
8161                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8162                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8163                                                   amd_8132))) {
8164
8165                         if (amd_8132->revision >= 0x10 &&
8166                             amd_8132->revision <= 0x13) {
8167                                 disable_msi = 1;
8168                                 pci_dev_put(amd_8132);
8169                                 break;
8170                         }
8171                 }
8172         }
8173
8174         bnx2_set_default_link(bp);
8175         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8176
8177         init_timer(&bp->timer);
8178         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8179         bp->timer.data = (unsigned long) bp;
8180         bp->timer.function = bnx2_timer;
8181
8182 #ifdef BCM_CNIC
8183         bp->cnic_eth_dev.max_iscsi_conn =
8184                 bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN);
8185 #endif
8186         pci_save_state(pdev);
8187
8188         return 0;
8189
8190 err_out_unmap:
8191         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8192                 pci_disable_pcie_error_reporting(pdev);
8193                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8194         }
8195
8196         if (bp->regview) {
8197                 iounmap(bp->regview);
8198                 bp->regview = NULL;
8199         }
8200
8201 err_out_release:
8202         pci_release_regions(pdev);
8203
8204 err_out_disable:
8205         pci_disable_device(pdev);
8206         pci_set_drvdata(pdev, NULL);
8207
8208 err_out:
8209         return rc;
8210 }
8211
8212 static char * __devinit
8213 bnx2_bus_string(struct bnx2 *bp, char *str)
8214 {
8215         char *s = str;
8216
8217         if (bp->flags & BNX2_FLAG_PCIE) {
8218                 s += sprintf(s, "PCI Express");
8219         } else {
8220                 s += sprintf(s, "PCI");
8221                 if (bp->flags & BNX2_FLAG_PCIX)
8222                         s += sprintf(s, "-X");
8223                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8224                         s += sprintf(s, " 32-bit");
8225                 else
8226                         s += sprintf(s, " 64-bit");
8227                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8228         }
8229         return str;
8230 }
8231
8232 static void
8233 bnx2_del_napi(struct bnx2 *bp)
8234 {
8235         int i;
8236
8237         for (i = 0; i < bp->irq_nvecs; i++)
8238                 netif_napi_del(&bp->bnx2_napi[i].napi);
8239 }
8240
8241 static void
8242 bnx2_init_napi(struct bnx2 *bp)
8243 {
8244         int i;
8245
8246         for (i = 0; i < bp->irq_nvecs; i++) {
8247                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8248                 int (*poll)(struct napi_struct *, int);
8249
8250                 if (i == 0)
8251                         poll = bnx2_poll;
8252                 else
8253                         poll = bnx2_poll_msix;
8254
8255                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8256                 bnapi->bp = bp;
8257         }
8258 }
8259
8260 static const struct net_device_ops bnx2_netdev_ops = {
8261         .ndo_open               = bnx2_open,
8262         .ndo_start_xmit         = bnx2_start_xmit,
8263         .ndo_stop               = bnx2_close,
8264         .ndo_get_stats64        = bnx2_get_stats64,
8265         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8266         .ndo_do_ioctl           = bnx2_ioctl,
8267         .ndo_validate_addr      = eth_validate_addr,
8268         .ndo_set_mac_address    = bnx2_change_mac_addr,
8269         .ndo_change_mtu         = bnx2_change_mtu,
8270         .ndo_fix_features       = bnx2_fix_features,
8271         .ndo_set_features       = bnx2_set_features,
8272         .ndo_tx_timeout         = bnx2_tx_timeout,
8273 #ifdef CONFIG_NET_POLL_CONTROLLER
8274         .ndo_poll_controller    = poll_bnx2,
8275 #endif
8276 };
8277
8278 static int __devinit
8279 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8280 {
8281         static int version_printed = 0;
8282         struct net_device *dev = NULL;
8283         struct bnx2 *bp;
8284         int rc;
8285         char str[40];
8286
8287         if (version_printed++ == 0)
8288                 pr_info("%s", version);
8289
8290         /* dev zeroed in init_etherdev */
8291         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8292
8293         if (!dev)
8294                 return -ENOMEM;
8295
8296         rc = bnx2_init_board(pdev, dev);
8297         if (rc < 0) {
8298                 free_netdev(dev);
8299                 return rc;
8300         }
8301
8302         dev->netdev_ops = &bnx2_netdev_ops;
8303         dev->watchdog_timeo = TX_TIMEOUT;
8304         dev->ethtool_ops = &bnx2_ethtool_ops;
8305
8306         bp = netdev_priv(dev);
8307
8308         pci_set_drvdata(pdev, dev);
8309
8310         rc = bnx2_request_firmware(bp);
8311         if (rc)
8312                 goto error;
8313
8314         memcpy(dev->dev_addr, bp->mac_addr, 6);
8315         memcpy(dev->perm_addr, bp->mac_addr, 6);
8316
8317         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8318                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8319                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8320
8321         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8322                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8323
8324         dev->vlan_features = dev->hw_features;
8325         dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8326         dev->features |= dev->hw_features;
8327
8328         if ((rc = register_netdev(dev))) {
8329                 dev_err(&pdev->dev, "Cannot register net device\n");
8330                 goto error;
8331         }
8332
8333         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8334                     board_info[ent->driver_data].name,
8335                     ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8336                     ((CHIP_ID(bp) & 0x0ff0) >> 4),
8337                     bnx2_bus_string(bp, str),
8338                     dev->base_addr,
8339                     bp->pdev->irq, dev->dev_addr);
8340
8341         return 0;
8342
8343 error:
8344         if (bp->mips_firmware)
8345                 release_firmware(bp->mips_firmware);
8346         if (bp->rv2p_firmware)
8347                 release_firmware(bp->rv2p_firmware);
8348
8349         if (bp->regview)
8350                 iounmap(bp->regview);
8351         pci_release_regions(pdev);
8352         pci_disable_device(pdev);
8353         pci_set_drvdata(pdev, NULL);
8354         free_netdev(dev);
8355         return rc;
8356 }
8357
8358 static void __devexit
8359 bnx2_remove_one(struct pci_dev *pdev)
8360 {
8361         struct net_device *dev = pci_get_drvdata(pdev);
8362         struct bnx2 *bp = netdev_priv(dev);
8363
8364         unregister_netdev(dev);
8365
8366         del_timer_sync(&bp->timer);
8367
8368         if (bp->mips_firmware)
8369                 release_firmware(bp->mips_firmware);
8370         if (bp->rv2p_firmware)
8371                 release_firmware(bp->rv2p_firmware);
8372
8373         if (bp->regview)
8374                 iounmap(bp->regview);
8375
8376         kfree(bp->temp_stats_blk);
8377
8378         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8379                 pci_disable_pcie_error_reporting(pdev);
8380                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8381         }
8382
8383         free_netdev(dev);
8384
8385         pci_release_regions(pdev);
8386         pci_disable_device(pdev);
8387         pci_set_drvdata(pdev, NULL);
8388 }
8389
8390 static int
8391 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8392 {
8393         struct net_device *dev = pci_get_drvdata(pdev);
8394         struct bnx2 *bp = netdev_priv(dev);
8395
8396         /* PCI register 4 needs to be saved whether netif_running() or not.
8397          * MSI address and data need to be saved if using MSI and
8398          * netif_running().
8399          */
8400         pci_save_state(pdev);
8401         if (!netif_running(dev))
8402                 return 0;
8403
8404         cancel_work_sync(&bp->reset_task);
8405         bnx2_netif_stop(bp, true);
8406         netif_device_detach(dev);
8407         del_timer_sync(&bp->timer);
8408         bnx2_shutdown_chip(bp);
8409         bnx2_free_skbs(bp);
8410         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8411         return 0;
8412 }
8413
8414 static int
8415 bnx2_resume(struct pci_dev *pdev)
8416 {
8417         struct net_device *dev = pci_get_drvdata(pdev);
8418         struct bnx2 *bp = netdev_priv(dev);
8419
8420         pci_restore_state(pdev);
8421         if (!netif_running(dev))
8422                 return 0;
8423
8424         bnx2_set_power_state(bp, PCI_D0);
8425         netif_device_attach(dev);
8426         bnx2_init_nic(bp, 1);
8427         bnx2_netif_start(bp, true);
8428         return 0;
8429 }
8430
8431 /**
8432  * bnx2_io_error_detected - called when PCI error is detected
8433  * @pdev: Pointer to PCI device
8434  * @state: The current pci connection state
8435  *
8436  * This function is called after a PCI bus error affecting
8437  * this device has been detected.
8438  */
8439 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8440                                                pci_channel_state_t state)
8441 {
8442         struct net_device *dev = pci_get_drvdata(pdev);
8443         struct bnx2 *bp = netdev_priv(dev);
8444
8445         rtnl_lock();
8446         netif_device_detach(dev);
8447
8448         if (state == pci_channel_io_perm_failure) {
8449                 rtnl_unlock();
8450                 return PCI_ERS_RESULT_DISCONNECT;
8451         }
8452
8453         if (netif_running(dev)) {
8454                 bnx2_netif_stop(bp, true);
8455                 del_timer_sync(&bp->timer);
8456                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8457         }
8458
8459         pci_disable_device(pdev);
8460         rtnl_unlock();
8461
8462         /* Request a slot slot reset. */
8463         return PCI_ERS_RESULT_NEED_RESET;
8464 }
8465
8466 /**
8467  * bnx2_io_slot_reset - called after the pci bus has been reset.
8468  * @pdev: Pointer to PCI device
8469  *
8470  * Restart the card from scratch, as if from a cold-boot.
8471  */
8472 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8473 {
8474         struct net_device *dev = pci_get_drvdata(pdev);
8475         struct bnx2 *bp = netdev_priv(dev);
8476         pci_ers_result_t result;
8477         int err;
8478
8479         rtnl_lock();
8480         if (pci_enable_device(pdev)) {
8481                 dev_err(&pdev->dev,
8482                         "Cannot re-enable PCI device after reset\n");
8483                 result = PCI_ERS_RESULT_DISCONNECT;
8484         } else {
8485                 pci_set_master(pdev);
8486                 pci_restore_state(pdev);
8487                 pci_save_state(pdev);
8488
8489                 if (netif_running(dev)) {
8490                         bnx2_set_power_state(bp, PCI_D0);
8491                         bnx2_init_nic(bp, 1);
8492                 }
8493                 result = PCI_ERS_RESULT_RECOVERED;
8494         }
8495         rtnl_unlock();
8496
8497         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8498                 return result;
8499
8500         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8501         if (err) {
8502                 dev_err(&pdev->dev,
8503                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8504                          err); /* non-fatal, continue */
8505         }
8506
8507         return result;
8508 }
8509
8510 /**
8511  * bnx2_io_resume - called when traffic can start flowing again.
8512  * @pdev: Pointer to PCI device
8513  *
8514  * This callback is called when the error recovery driver tells us that
8515  * its OK to resume normal operation.
8516  */
8517 static void bnx2_io_resume(struct pci_dev *pdev)
8518 {
8519         struct net_device *dev = pci_get_drvdata(pdev);
8520         struct bnx2 *bp = netdev_priv(dev);
8521
8522         rtnl_lock();
8523         if (netif_running(dev))
8524                 bnx2_netif_start(bp, true);
8525
8526         netif_device_attach(dev);
8527         rtnl_unlock();
8528 }
8529
8530 static struct pci_error_handlers bnx2_err_handler = {
8531         .error_detected = bnx2_io_error_detected,
8532         .slot_reset     = bnx2_io_slot_reset,
8533         .resume         = bnx2_io_resume,
8534 };
8535
8536 static struct pci_driver bnx2_pci_driver = {
8537         .name           = DRV_MODULE_NAME,
8538         .id_table       = bnx2_pci_tbl,
8539         .probe          = bnx2_init_one,
8540         .remove         = __devexit_p(bnx2_remove_one),
8541         .suspend        = bnx2_suspend,
8542         .resume         = bnx2_resume,
8543         .err_handler    = &bnx2_err_handler,
8544 };
8545
8546 static int __init bnx2_init(void)
8547 {
8548         return pci_register_driver(&bnx2_pci_driver);
8549 }
8550
8551 static void __exit bnx2_cleanup(void)
8552 {
8553         pci_unregister_driver(&bnx2_pci_driver);
8554 }
8555
8556 module_init(bnx2_init);
8557 module_exit(bnx2_cleanup);
8558
8559
8560