l2tp: Split pppol2tp patch into separate l2tp and ppp parts
[pandora-kernel.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.0.8"
62 #define DRV_MODULE_RELDATE      "Feb 15, 2010"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
250 {
251         u32 diff;
252
253         smp_mb();
254
255         /* The ring uses 256 indices for 255 entries, one of them
256          * needs to be skipped.
257          */
258         diff = txr->tx_prod - txr->tx_cons;
259         if (unlikely(diff >= TX_DESC_CNT)) {
260                 diff &= 0xffff;
261                 if (diff == TX_DESC_CNT)
262                         diff = MAX_TX_DESC_CNT;
263         }
264         return (bp->tx_ring_size - diff);
265 }
266
267 static u32
268 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
269 {
270         u32 val;
271
272         spin_lock_bh(&bp->indirect_lock);
273         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
274         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
275         spin_unlock_bh(&bp->indirect_lock);
276         return val;
277 }
278
279 static void
280 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
281 {
282         spin_lock_bh(&bp->indirect_lock);
283         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
284         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
285         spin_unlock_bh(&bp->indirect_lock);
286 }
287
288 static void
289 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
290 {
291         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
292 }
293
294 static u32
295 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
296 {
297         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
298 }
299
300 static void
301 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
302 {
303         offset += cid_addr;
304         spin_lock_bh(&bp->indirect_lock);
305         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
306                 int i;
307
308                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
309                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
310                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
311                 for (i = 0; i < 5; i++) {
312                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
313                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
314                                 break;
315                         udelay(5);
316                 }
317         } else {
318                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
319                 REG_WR(bp, BNX2_CTX_DATA, val);
320         }
321         spin_unlock_bh(&bp->indirect_lock);
322 }
323
324 #ifdef BCM_CNIC
325 static int
326 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
327 {
328         struct bnx2 *bp = netdev_priv(dev);
329         struct drv_ctl_io *io = &info->data.io;
330
331         switch (info->cmd) {
332         case DRV_CTL_IO_WR_CMD:
333                 bnx2_reg_wr_ind(bp, io->offset, io->data);
334                 break;
335         case DRV_CTL_IO_RD_CMD:
336                 io->data = bnx2_reg_rd_ind(bp, io->offset);
337                 break;
338         case DRV_CTL_CTX_WR_CMD:
339                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
340                 break;
341         default:
342                 return -EINVAL;
343         }
344         return 0;
345 }
346
347 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
348 {
349         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
350         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
351         int sb_id;
352
353         if (bp->flags & BNX2_FLAG_USING_MSIX) {
354                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
355                 bnapi->cnic_present = 0;
356                 sb_id = bp->irq_nvecs;
357                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
358         } else {
359                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
360                 bnapi->cnic_tag = bnapi->last_status_idx;
361                 bnapi->cnic_present = 1;
362                 sb_id = 0;
363                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
364         }
365
366         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
367         cp->irq_arr[0].status_blk = (void *)
368                 ((unsigned long) bnapi->status_blk.msi +
369                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
370         cp->irq_arr[0].status_blk_num = sb_id;
371         cp->num_irq = 1;
372 }
373
374 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
375                               void *data)
376 {
377         struct bnx2 *bp = netdev_priv(dev);
378         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
379
380         if (ops == NULL)
381                 return -EINVAL;
382
383         if (cp->drv_state & CNIC_DRV_STATE_REGD)
384                 return -EBUSY;
385
386         bp->cnic_data = data;
387         rcu_assign_pointer(bp->cnic_ops, ops);
388
389         cp->num_irq = 0;
390         cp->drv_state = CNIC_DRV_STATE_REGD;
391
392         bnx2_setup_cnic_irq_info(bp);
393
394         return 0;
395 }
396
397 static int bnx2_unregister_cnic(struct net_device *dev)
398 {
399         struct bnx2 *bp = netdev_priv(dev);
400         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
401         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
402
403         mutex_lock(&bp->cnic_lock);
404         cp->drv_state = 0;
405         bnapi->cnic_present = 0;
406         rcu_assign_pointer(bp->cnic_ops, NULL);
407         mutex_unlock(&bp->cnic_lock);
408         synchronize_rcu();
409         return 0;
410 }
411
412 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
413 {
414         struct bnx2 *bp = netdev_priv(dev);
415         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
416
417         cp->drv_owner = THIS_MODULE;
418         cp->chip_id = bp->chip_id;
419         cp->pdev = bp->pdev;
420         cp->io_base = bp->regview;
421         cp->drv_ctl = bnx2_drv_ctl;
422         cp->drv_register_cnic = bnx2_register_cnic;
423         cp->drv_unregister_cnic = bnx2_unregister_cnic;
424
425         return cp;
426 }
427 EXPORT_SYMBOL(bnx2_cnic_probe);
428
429 static void
430 bnx2_cnic_stop(struct bnx2 *bp)
431 {
432         struct cnic_ops *c_ops;
433         struct cnic_ctl_info info;
434
435         mutex_lock(&bp->cnic_lock);
436         c_ops = bp->cnic_ops;
437         if (c_ops) {
438                 info.cmd = CNIC_CTL_STOP_CMD;
439                 c_ops->cnic_ctl(bp->cnic_data, &info);
440         }
441         mutex_unlock(&bp->cnic_lock);
442 }
443
444 static void
445 bnx2_cnic_start(struct bnx2 *bp)
446 {
447         struct cnic_ops *c_ops;
448         struct cnic_ctl_info info;
449
450         mutex_lock(&bp->cnic_lock);
451         c_ops = bp->cnic_ops;
452         if (c_ops) {
453                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
454                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
455
456                         bnapi->cnic_tag = bnapi->last_status_idx;
457                 }
458                 info.cmd = CNIC_CTL_START_CMD;
459                 c_ops->cnic_ctl(bp->cnic_data, &info);
460         }
461         mutex_unlock(&bp->cnic_lock);
462 }
463
464 #else
465
466 static void
467 bnx2_cnic_stop(struct bnx2 *bp)
468 {
469 }
470
471 static void
472 bnx2_cnic_start(struct bnx2 *bp)
473 {
474 }
475
476 #endif
477
478 static int
479 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
480 {
481         u32 val1;
482         int i, ret;
483
484         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
485                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
486                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
487
488                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
489                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
490
491                 udelay(40);
492         }
493
494         val1 = (bp->phy_addr << 21) | (reg << 16) |
495                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
496                 BNX2_EMAC_MDIO_COMM_START_BUSY;
497         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
498
499         for (i = 0; i < 50; i++) {
500                 udelay(10);
501
502                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
503                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
504                         udelay(5);
505
506                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
507                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
508
509                         break;
510                 }
511         }
512
513         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
514                 *val = 0x0;
515                 ret = -EBUSY;
516         }
517         else {
518                 *val = val1;
519                 ret = 0;
520         }
521
522         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
523                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
524                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
525
526                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
527                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
528
529                 udelay(40);
530         }
531
532         return ret;
533 }
534
535 static int
536 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
537 {
538         u32 val1;
539         int i, ret;
540
541         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
542                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
543                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
544
545                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
546                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
547
548                 udelay(40);
549         }
550
551         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
552                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
553                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
554         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
555
556         for (i = 0; i < 50; i++) {
557                 udelay(10);
558
559                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
560                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
561                         udelay(5);
562                         break;
563                 }
564         }
565
566         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
567                 ret = -EBUSY;
568         else
569                 ret = 0;
570
571         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
572                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
573                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
574
575                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
576                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
577
578                 udelay(40);
579         }
580
581         return ret;
582 }
583
584 static void
585 bnx2_disable_int(struct bnx2 *bp)
586 {
587         int i;
588         struct bnx2_napi *bnapi;
589
590         for (i = 0; i < bp->irq_nvecs; i++) {
591                 bnapi = &bp->bnx2_napi[i];
592                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
593                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
594         }
595         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
596 }
597
598 static void
599 bnx2_enable_int(struct bnx2 *bp)
600 {
601         int i;
602         struct bnx2_napi *bnapi;
603
604         for (i = 0; i < bp->irq_nvecs; i++) {
605                 bnapi = &bp->bnx2_napi[i];
606
607                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
608                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
609                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
610                        bnapi->last_status_idx);
611
612                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
613                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
614                        bnapi->last_status_idx);
615         }
616         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
617 }
618
619 static void
620 bnx2_disable_int_sync(struct bnx2 *bp)
621 {
622         int i;
623
624         atomic_inc(&bp->intr_sem);
625         if (!netif_running(bp->dev))
626                 return;
627
628         bnx2_disable_int(bp);
629         for (i = 0; i < bp->irq_nvecs; i++)
630                 synchronize_irq(bp->irq_tbl[i].vector);
631 }
632
633 static void
634 bnx2_napi_disable(struct bnx2 *bp)
635 {
636         int i;
637
638         for (i = 0; i < bp->irq_nvecs; i++)
639                 napi_disable(&bp->bnx2_napi[i].napi);
640 }
641
642 static void
643 bnx2_napi_enable(struct bnx2 *bp)
644 {
645         int i;
646
647         for (i = 0; i < bp->irq_nvecs; i++)
648                 napi_enable(&bp->bnx2_napi[i].napi);
649 }
650
651 static void
652 bnx2_netif_stop(struct bnx2 *bp)
653 {
654         bnx2_cnic_stop(bp);
655         if (netif_running(bp->dev)) {
656                 int i;
657
658                 bnx2_napi_disable(bp);
659                 netif_tx_disable(bp->dev);
660                 /* prevent tx timeout */
661                 for (i = 0; i <  bp->dev->num_tx_queues; i++) {
662                         struct netdev_queue *txq;
663
664                         txq = netdev_get_tx_queue(bp->dev, i);
665                         txq->trans_start = jiffies;
666                 }
667         }
668         bnx2_disable_int_sync(bp);
669 }
670
671 static void
672 bnx2_netif_start(struct bnx2 *bp)
673 {
674         if (atomic_dec_and_test(&bp->intr_sem)) {
675                 if (netif_running(bp->dev)) {
676                         netif_tx_wake_all_queues(bp->dev);
677                         bnx2_napi_enable(bp);
678                         bnx2_enable_int(bp);
679                         bnx2_cnic_start(bp);
680                 }
681         }
682 }
683
684 static void
685 bnx2_free_tx_mem(struct bnx2 *bp)
686 {
687         int i;
688
689         for (i = 0; i < bp->num_tx_rings; i++) {
690                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
691                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
692
693                 if (txr->tx_desc_ring) {
694                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
695                                             txr->tx_desc_ring,
696                                             txr->tx_desc_mapping);
697                         txr->tx_desc_ring = NULL;
698                 }
699                 kfree(txr->tx_buf_ring);
700                 txr->tx_buf_ring = NULL;
701         }
702 }
703
704 static void
705 bnx2_free_rx_mem(struct bnx2 *bp)
706 {
707         int i;
708
709         for (i = 0; i < bp->num_rx_rings; i++) {
710                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
711                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
712                 int j;
713
714                 for (j = 0; j < bp->rx_max_ring; j++) {
715                         if (rxr->rx_desc_ring[j])
716                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
717                                                     rxr->rx_desc_ring[j],
718                                                     rxr->rx_desc_mapping[j]);
719                         rxr->rx_desc_ring[j] = NULL;
720                 }
721                 vfree(rxr->rx_buf_ring);
722                 rxr->rx_buf_ring = NULL;
723
724                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
725                         if (rxr->rx_pg_desc_ring[j])
726                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
727                                                     rxr->rx_pg_desc_ring[j],
728                                                     rxr->rx_pg_desc_mapping[j]);
729                         rxr->rx_pg_desc_ring[j] = NULL;
730                 }
731                 vfree(rxr->rx_pg_ring);
732                 rxr->rx_pg_ring = NULL;
733         }
734 }
735
736 static int
737 bnx2_alloc_tx_mem(struct bnx2 *bp)
738 {
739         int i;
740
741         for (i = 0; i < bp->num_tx_rings; i++) {
742                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
743                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
744
745                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
746                 if (txr->tx_buf_ring == NULL)
747                         return -ENOMEM;
748
749                 txr->tx_desc_ring =
750                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
751                                              &txr->tx_desc_mapping);
752                 if (txr->tx_desc_ring == NULL)
753                         return -ENOMEM;
754         }
755         return 0;
756 }
757
758 static int
759 bnx2_alloc_rx_mem(struct bnx2 *bp)
760 {
761         int i;
762
763         for (i = 0; i < bp->num_rx_rings; i++) {
764                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
765                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
766                 int j;
767
768                 rxr->rx_buf_ring =
769                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
770                 if (rxr->rx_buf_ring == NULL)
771                         return -ENOMEM;
772
773                 memset(rxr->rx_buf_ring, 0,
774                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
775
776                 for (j = 0; j < bp->rx_max_ring; j++) {
777                         rxr->rx_desc_ring[j] =
778                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
779                                                      &rxr->rx_desc_mapping[j]);
780                         if (rxr->rx_desc_ring[j] == NULL)
781                                 return -ENOMEM;
782
783                 }
784
785                 if (bp->rx_pg_ring_size) {
786                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
787                                                   bp->rx_max_pg_ring);
788                         if (rxr->rx_pg_ring == NULL)
789                                 return -ENOMEM;
790
791                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
792                                bp->rx_max_pg_ring);
793                 }
794
795                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
796                         rxr->rx_pg_desc_ring[j] =
797                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
798                                                 &rxr->rx_pg_desc_mapping[j]);
799                         if (rxr->rx_pg_desc_ring[j] == NULL)
800                                 return -ENOMEM;
801
802                 }
803         }
804         return 0;
805 }
806
807 static void
808 bnx2_free_mem(struct bnx2 *bp)
809 {
810         int i;
811         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
812
813         bnx2_free_tx_mem(bp);
814         bnx2_free_rx_mem(bp);
815
816         for (i = 0; i < bp->ctx_pages; i++) {
817                 if (bp->ctx_blk[i]) {
818                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
819                                             bp->ctx_blk[i],
820                                             bp->ctx_blk_mapping[i]);
821                         bp->ctx_blk[i] = NULL;
822                 }
823         }
824         if (bnapi->status_blk.msi) {
825                 pci_free_consistent(bp->pdev, bp->status_stats_size,
826                                     bnapi->status_blk.msi,
827                                     bp->status_blk_mapping);
828                 bnapi->status_blk.msi = NULL;
829                 bp->stats_blk = NULL;
830         }
831 }
832
833 static int
834 bnx2_alloc_mem(struct bnx2 *bp)
835 {
836         int i, status_blk_size, err;
837         struct bnx2_napi *bnapi;
838         void *status_blk;
839
840         /* Combine status and statistics blocks into one allocation. */
841         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
842         if (bp->flags & BNX2_FLAG_MSIX_CAP)
843                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
844                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
845         bp->status_stats_size = status_blk_size +
846                                 sizeof(struct statistics_block);
847
848         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
849                                           &bp->status_blk_mapping);
850         if (status_blk == NULL)
851                 goto alloc_mem_err;
852
853         memset(status_blk, 0, bp->status_stats_size);
854
855         bnapi = &bp->bnx2_napi[0];
856         bnapi->status_blk.msi = status_blk;
857         bnapi->hw_tx_cons_ptr =
858                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
859         bnapi->hw_rx_cons_ptr =
860                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
861         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
862                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
863                         struct status_block_msix *sblk;
864
865                         bnapi = &bp->bnx2_napi[i];
866
867                         sblk = (void *) (status_blk +
868                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
869                         bnapi->status_blk.msix = sblk;
870                         bnapi->hw_tx_cons_ptr =
871                                 &sblk->status_tx_quick_consumer_index;
872                         bnapi->hw_rx_cons_ptr =
873                                 &sblk->status_rx_quick_consumer_index;
874                         bnapi->int_num = i << 24;
875                 }
876         }
877
878         bp->stats_blk = status_blk + status_blk_size;
879
880         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
881
882         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
883                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
884                 if (bp->ctx_pages == 0)
885                         bp->ctx_pages = 1;
886                 for (i = 0; i < bp->ctx_pages; i++) {
887                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
888                                                 BCM_PAGE_SIZE,
889                                                 &bp->ctx_blk_mapping[i]);
890                         if (bp->ctx_blk[i] == NULL)
891                                 goto alloc_mem_err;
892                 }
893         }
894
895         err = bnx2_alloc_rx_mem(bp);
896         if (err)
897                 goto alloc_mem_err;
898
899         err = bnx2_alloc_tx_mem(bp);
900         if (err)
901                 goto alloc_mem_err;
902
903         return 0;
904
905 alloc_mem_err:
906         bnx2_free_mem(bp);
907         return -ENOMEM;
908 }
909
910 static void
911 bnx2_report_fw_link(struct bnx2 *bp)
912 {
913         u32 fw_link_status = 0;
914
915         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
916                 return;
917
918         if (bp->link_up) {
919                 u32 bmsr;
920
921                 switch (bp->line_speed) {
922                 case SPEED_10:
923                         if (bp->duplex == DUPLEX_HALF)
924                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
925                         else
926                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
927                         break;
928                 case SPEED_100:
929                         if (bp->duplex == DUPLEX_HALF)
930                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
931                         else
932                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
933                         break;
934                 case SPEED_1000:
935                         if (bp->duplex == DUPLEX_HALF)
936                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
937                         else
938                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
939                         break;
940                 case SPEED_2500:
941                         if (bp->duplex == DUPLEX_HALF)
942                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
943                         else
944                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
945                         break;
946                 }
947
948                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
949
950                 if (bp->autoneg) {
951                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
952
953                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
954                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
955
956                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
957                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
958                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
959                         else
960                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
961                 }
962         }
963         else
964                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
965
966         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
967 }
968
969 static char *
970 bnx2_xceiver_str(struct bnx2 *bp)
971 {
972         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
973                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
974                  "Copper"));
975 }
976
977 static void
978 bnx2_report_link(struct bnx2 *bp)
979 {
980         if (bp->link_up) {
981                 netif_carrier_on(bp->dev);
982                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
983                             bnx2_xceiver_str(bp),
984                             bp->line_speed,
985                             bp->duplex == DUPLEX_FULL ? "full" : "half");
986
987                 if (bp->flow_ctrl) {
988                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
989                                 pr_cont(", receive ");
990                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
991                                         pr_cont("& transmit ");
992                         }
993                         else {
994                                 pr_cont(", transmit ");
995                         }
996                         pr_cont("flow control ON");
997                 }
998                 pr_cont("\n");
999         } else {
1000                 netif_carrier_off(bp->dev);
1001                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1002                            bnx2_xceiver_str(bp));
1003         }
1004
1005         bnx2_report_fw_link(bp);
1006 }
1007
1008 static void
1009 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1010 {
1011         u32 local_adv, remote_adv;
1012
1013         bp->flow_ctrl = 0;
1014         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1015                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1016
1017                 if (bp->duplex == DUPLEX_FULL) {
1018                         bp->flow_ctrl = bp->req_flow_ctrl;
1019                 }
1020                 return;
1021         }
1022
1023         if (bp->duplex != DUPLEX_FULL) {
1024                 return;
1025         }
1026
1027         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1028             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1029                 u32 val;
1030
1031                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1032                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1033                         bp->flow_ctrl |= FLOW_CTRL_TX;
1034                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1035                         bp->flow_ctrl |= FLOW_CTRL_RX;
1036                 return;
1037         }
1038
1039         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1040         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1041
1042         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1043                 u32 new_local_adv = 0;
1044                 u32 new_remote_adv = 0;
1045
1046                 if (local_adv & ADVERTISE_1000XPAUSE)
1047                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1048                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1049                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1050                 if (remote_adv & ADVERTISE_1000XPAUSE)
1051                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1052                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1053                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1054
1055                 local_adv = new_local_adv;
1056                 remote_adv = new_remote_adv;
1057         }
1058
1059         /* See Table 28B-3 of 802.3ab-1999 spec. */
1060         if (local_adv & ADVERTISE_PAUSE_CAP) {
1061                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1062                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1063                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1064                         }
1065                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1066                                 bp->flow_ctrl = FLOW_CTRL_RX;
1067                         }
1068                 }
1069                 else {
1070                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1072                         }
1073                 }
1074         }
1075         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1076                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1077                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1078
1079                         bp->flow_ctrl = FLOW_CTRL_TX;
1080                 }
1081         }
1082 }
1083
1084 static int
1085 bnx2_5709s_linkup(struct bnx2 *bp)
1086 {
1087         u32 val, speed;
1088
1089         bp->link_up = 1;
1090
1091         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1092         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1093         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1094
1095         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1096                 bp->line_speed = bp->req_line_speed;
1097                 bp->duplex = bp->req_duplex;
1098                 return 0;
1099         }
1100         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1101         switch (speed) {
1102                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1103                         bp->line_speed = SPEED_10;
1104                         break;
1105                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1106                         bp->line_speed = SPEED_100;
1107                         break;
1108                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1109                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1110                         bp->line_speed = SPEED_1000;
1111                         break;
1112                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1113                         bp->line_speed = SPEED_2500;
1114                         break;
1115         }
1116         if (val & MII_BNX2_GP_TOP_AN_FD)
1117                 bp->duplex = DUPLEX_FULL;
1118         else
1119                 bp->duplex = DUPLEX_HALF;
1120         return 0;
1121 }
1122
1123 static int
1124 bnx2_5708s_linkup(struct bnx2 *bp)
1125 {
1126         u32 val;
1127
1128         bp->link_up = 1;
1129         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1130         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1131                 case BCM5708S_1000X_STAT1_SPEED_10:
1132                         bp->line_speed = SPEED_10;
1133                         break;
1134                 case BCM5708S_1000X_STAT1_SPEED_100:
1135                         bp->line_speed = SPEED_100;
1136                         break;
1137                 case BCM5708S_1000X_STAT1_SPEED_1G:
1138                         bp->line_speed = SPEED_1000;
1139                         break;
1140                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1141                         bp->line_speed = SPEED_2500;
1142                         break;
1143         }
1144         if (val & BCM5708S_1000X_STAT1_FD)
1145                 bp->duplex = DUPLEX_FULL;
1146         else
1147                 bp->duplex = DUPLEX_HALF;
1148
1149         return 0;
1150 }
1151
1152 static int
1153 bnx2_5706s_linkup(struct bnx2 *bp)
1154 {
1155         u32 bmcr, local_adv, remote_adv, common;
1156
1157         bp->link_up = 1;
1158         bp->line_speed = SPEED_1000;
1159
1160         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1161         if (bmcr & BMCR_FULLDPLX) {
1162                 bp->duplex = DUPLEX_FULL;
1163         }
1164         else {
1165                 bp->duplex = DUPLEX_HALF;
1166         }
1167
1168         if (!(bmcr & BMCR_ANENABLE)) {
1169                 return 0;
1170         }
1171
1172         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1173         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1174
1175         common = local_adv & remote_adv;
1176         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1177
1178                 if (common & ADVERTISE_1000XFULL) {
1179                         bp->duplex = DUPLEX_FULL;
1180                 }
1181                 else {
1182                         bp->duplex = DUPLEX_HALF;
1183                 }
1184         }
1185
1186         return 0;
1187 }
1188
1189 static int
1190 bnx2_copper_linkup(struct bnx2 *bp)
1191 {
1192         u32 bmcr;
1193
1194         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1195         if (bmcr & BMCR_ANENABLE) {
1196                 u32 local_adv, remote_adv, common;
1197
1198                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1199                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1200
1201                 common = local_adv & (remote_adv >> 2);
1202                 if (common & ADVERTISE_1000FULL) {
1203                         bp->line_speed = SPEED_1000;
1204                         bp->duplex = DUPLEX_FULL;
1205                 }
1206                 else if (common & ADVERTISE_1000HALF) {
1207                         bp->line_speed = SPEED_1000;
1208                         bp->duplex = DUPLEX_HALF;
1209                 }
1210                 else {
1211                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1212                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1213
1214                         common = local_adv & remote_adv;
1215                         if (common & ADVERTISE_100FULL) {
1216                                 bp->line_speed = SPEED_100;
1217                                 bp->duplex = DUPLEX_FULL;
1218                         }
1219                         else if (common & ADVERTISE_100HALF) {
1220                                 bp->line_speed = SPEED_100;
1221                                 bp->duplex = DUPLEX_HALF;
1222                         }
1223                         else if (common & ADVERTISE_10FULL) {
1224                                 bp->line_speed = SPEED_10;
1225                                 bp->duplex = DUPLEX_FULL;
1226                         }
1227                         else if (common & ADVERTISE_10HALF) {
1228                                 bp->line_speed = SPEED_10;
1229                                 bp->duplex = DUPLEX_HALF;
1230                         }
1231                         else {
1232                                 bp->line_speed = 0;
1233                                 bp->link_up = 0;
1234                         }
1235                 }
1236         }
1237         else {
1238                 if (bmcr & BMCR_SPEED100) {
1239                         bp->line_speed = SPEED_100;
1240                 }
1241                 else {
1242                         bp->line_speed = SPEED_10;
1243                 }
1244                 if (bmcr & BMCR_FULLDPLX) {
1245                         bp->duplex = DUPLEX_FULL;
1246                 }
1247                 else {
1248                         bp->duplex = DUPLEX_HALF;
1249                 }
1250         }
1251
1252         return 0;
1253 }
1254
1255 static void
1256 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1257 {
1258         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1259
1260         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1261         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1262         val |= 0x02 << 8;
1263
1264         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1265                 u32 lo_water, hi_water;
1266
1267                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1268                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1269                 else
1270                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1271                 if (lo_water >= bp->rx_ring_size)
1272                         lo_water = 0;
1273
1274                 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1275
1276                 if (hi_water <= lo_water)
1277                         lo_water = 0;
1278
1279                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1280                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1281
1282                 if (hi_water > 0xf)
1283                         hi_water = 0xf;
1284                 else if (hi_water == 0)
1285                         lo_water = 0;
1286                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1287         }
1288         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1289 }
1290
1291 static void
1292 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1293 {
1294         int i;
1295         u32 cid;
1296
1297         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1298                 if (i == 1)
1299                         cid = RX_RSS_CID;
1300                 bnx2_init_rx_context(bp, cid);
1301         }
1302 }
1303
1304 static void
1305 bnx2_set_mac_link(struct bnx2 *bp)
1306 {
1307         u32 val;
1308
1309         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1310         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1311                 (bp->duplex == DUPLEX_HALF)) {
1312                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1313         }
1314
1315         /* Configure the EMAC mode register. */
1316         val = REG_RD(bp, BNX2_EMAC_MODE);
1317
1318         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1319                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1320                 BNX2_EMAC_MODE_25G_MODE);
1321
1322         if (bp->link_up) {
1323                 switch (bp->line_speed) {
1324                         case SPEED_10:
1325                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1326                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1327                                         break;
1328                                 }
1329                                 /* fall through */
1330                         case SPEED_100:
1331                                 val |= BNX2_EMAC_MODE_PORT_MII;
1332                                 break;
1333                         case SPEED_2500:
1334                                 val |= BNX2_EMAC_MODE_25G_MODE;
1335                                 /* fall through */
1336                         case SPEED_1000:
1337                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1338                                 break;
1339                 }
1340         }
1341         else {
1342                 val |= BNX2_EMAC_MODE_PORT_GMII;
1343         }
1344
1345         /* Set the MAC to operate in the appropriate duplex mode. */
1346         if (bp->duplex == DUPLEX_HALF)
1347                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1348         REG_WR(bp, BNX2_EMAC_MODE, val);
1349
1350         /* Enable/disable rx PAUSE. */
1351         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1352
1353         if (bp->flow_ctrl & FLOW_CTRL_RX)
1354                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1355         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1356
1357         /* Enable/disable tx PAUSE. */
1358         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1359         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1360
1361         if (bp->flow_ctrl & FLOW_CTRL_TX)
1362                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1363         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1364
1365         /* Acknowledge the interrupt. */
1366         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1367
1368         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1369                 bnx2_init_all_rx_contexts(bp);
1370 }
1371
1372 static void
1373 bnx2_enable_bmsr1(struct bnx2 *bp)
1374 {
1375         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1376             (CHIP_NUM(bp) == CHIP_NUM_5709))
1377                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1378                                MII_BNX2_BLK_ADDR_GP_STATUS);
1379 }
1380
1381 static void
1382 bnx2_disable_bmsr1(struct bnx2 *bp)
1383 {
1384         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1385             (CHIP_NUM(bp) == CHIP_NUM_5709))
1386                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1387                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1388 }
1389
1390 static int
1391 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1392 {
1393         u32 up1;
1394         int ret = 1;
1395
1396         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1397                 return 0;
1398
1399         if (bp->autoneg & AUTONEG_SPEED)
1400                 bp->advertising |= ADVERTISED_2500baseX_Full;
1401
1402         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1403                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1404
1405         bnx2_read_phy(bp, bp->mii_up1, &up1);
1406         if (!(up1 & BCM5708S_UP1_2G5)) {
1407                 up1 |= BCM5708S_UP1_2G5;
1408                 bnx2_write_phy(bp, bp->mii_up1, up1);
1409                 ret = 0;
1410         }
1411
1412         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1413                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1414                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1415
1416         return ret;
1417 }
1418
1419 static int
1420 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1421 {
1422         u32 up1;
1423         int ret = 0;
1424
1425         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1426                 return 0;
1427
1428         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1429                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1430
1431         bnx2_read_phy(bp, bp->mii_up1, &up1);
1432         if (up1 & BCM5708S_UP1_2G5) {
1433                 up1 &= ~BCM5708S_UP1_2G5;
1434                 bnx2_write_phy(bp, bp->mii_up1, up1);
1435                 ret = 1;
1436         }
1437
1438         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1439                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1440                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1441
1442         return ret;
1443 }
1444
1445 static void
1446 bnx2_enable_forced_2g5(struct bnx2 *bp)
1447 {
1448         u32 bmcr;
1449
1450         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1451                 return;
1452
1453         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1454                 u32 val;
1455
1456                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1457                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1458                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1459                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1460                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1461                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1462
1463                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1464                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1466
1467         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1468                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1469                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1470         } else {
1471                 return;
1472         }
1473
1474         if (bp->autoneg & AUTONEG_SPEED) {
1475                 bmcr &= ~BMCR_ANENABLE;
1476                 if (bp->req_duplex == DUPLEX_FULL)
1477                         bmcr |= BMCR_FULLDPLX;
1478         }
1479         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1480 }
1481
1482 static void
1483 bnx2_disable_forced_2g5(struct bnx2 *bp)
1484 {
1485         u32 bmcr;
1486
1487         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1488                 return;
1489
1490         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1491                 u32 val;
1492
1493                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1494                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1495                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1496                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1497                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1498
1499                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1500                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1501                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1502
1503         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1504                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1505                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1506         } else {
1507                 return;
1508         }
1509
1510         if (bp->autoneg & AUTONEG_SPEED)
1511                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1512         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1513 }
1514
1515 static void
1516 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1517 {
1518         u32 val;
1519
1520         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1521         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1522         if (start)
1523                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1524         else
1525                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1526 }
1527
1528 static int
1529 bnx2_set_link(struct bnx2 *bp)
1530 {
1531         u32 bmsr;
1532         u8 link_up;
1533
1534         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1535                 bp->link_up = 1;
1536                 return 0;
1537         }
1538
1539         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1540                 return 0;
1541
1542         link_up = bp->link_up;
1543
1544         bnx2_enable_bmsr1(bp);
1545         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1547         bnx2_disable_bmsr1(bp);
1548
1549         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1550             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1551                 u32 val, an_dbg;
1552
1553                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1554                         bnx2_5706s_force_link_dn(bp, 0);
1555                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1556                 }
1557                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1558
1559                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1562
1563                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1564                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1565                         bmsr |= BMSR_LSTATUS;
1566                 else
1567                         bmsr &= ~BMSR_LSTATUS;
1568         }
1569
1570         if (bmsr & BMSR_LSTATUS) {
1571                 bp->link_up = 1;
1572
1573                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1574                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1575                                 bnx2_5706s_linkup(bp);
1576                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1577                                 bnx2_5708s_linkup(bp);
1578                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1579                                 bnx2_5709s_linkup(bp);
1580                 }
1581                 else {
1582                         bnx2_copper_linkup(bp);
1583                 }
1584                 bnx2_resolve_flow_ctrl(bp);
1585         }
1586         else {
1587                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1588                     (bp->autoneg & AUTONEG_SPEED))
1589                         bnx2_disable_forced_2g5(bp);
1590
1591                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1592                         u32 bmcr;
1593
1594                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1595                         bmcr |= BMCR_ANENABLE;
1596                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1597
1598                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1599                 }
1600                 bp->link_up = 0;
1601         }
1602
1603         if (bp->link_up != link_up) {
1604                 bnx2_report_link(bp);
1605         }
1606
1607         bnx2_set_mac_link(bp);
1608
1609         return 0;
1610 }
1611
1612 static int
1613 bnx2_reset_phy(struct bnx2 *bp)
1614 {
1615         int i;
1616         u32 reg;
1617
1618         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1619
1620 #define PHY_RESET_MAX_WAIT 100
1621         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1622                 udelay(10);
1623
1624                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1625                 if (!(reg & BMCR_RESET)) {
1626                         udelay(20);
1627                         break;
1628                 }
1629         }
1630         if (i == PHY_RESET_MAX_WAIT) {
1631                 return -EBUSY;
1632         }
1633         return 0;
1634 }
1635
1636 static u32
1637 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1638 {
1639         u32 adv = 0;
1640
1641         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1642                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1643
1644                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1645                         adv = ADVERTISE_1000XPAUSE;
1646                 }
1647                 else {
1648                         adv = ADVERTISE_PAUSE_CAP;
1649                 }
1650         }
1651         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1652                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1653                         adv = ADVERTISE_1000XPSE_ASYM;
1654                 }
1655                 else {
1656                         adv = ADVERTISE_PAUSE_ASYM;
1657                 }
1658         }
1659         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1660                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1661                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1662                 }
1663                 else {
1664                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1665                 }
1666         }
1667         return adv;
1668 }
1669
1670 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1671
1672 static int
1673 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1674 __releases(&bp->phy_lock)
1675 __acquires(&bp->phy_lock)
1676 {
1677         u32 speed_arg = 0, pause_adv;
1678
1679         pause_adv = bnx2_phy_get_pause_adv(bp);
1680
1681         if (bp->autoneg & AUTONEG_SPEED) {
1682                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1683                 if (bp->advertising & ADVERTISED_10baseT_Half)
1684                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1685                 if (bp->advertising & ADVERTISED_10baseT_Full)
1686                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1687                 if (bp->advertising & ADVERTISED_100baseT_Half)
1688                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1689                 if (bp->advertising & ADVERTISED_100baseT_Full)
1690                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1691                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1692                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1694                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695         } else {
1696                 if (bp->req_line_speed == SPEED_2500)
1697                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1698                 else if (bp->req_line_speed == SPEED_1000)
1699                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700                 else if (bp->req_line_speed == SPEED_100) {
1701                         if (bp->req_duplex == DUPLEX_FULL)
1702                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1703                         else
1704                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1705                 } else if (bp->req_line_speed == SPEED_10) {
1706                         if (bp->req_duplex == DUPLEX_FULL)
1707                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1708                         else
1709                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1710                 }
1711         }
1712
1713         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1714                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1715         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1716                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1717
1718         if (port == PORT_TP)
1719                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1720                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1721
1722         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1723
1724         spin_unlock_bh(&bp->phy_lock);
1725         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1726         spin_lock_bh(&bp->phy_lock);
1727
1728         return 0;
1729 }
1730
1731 static int
1732 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1733 __releases(&bp->phy_lock)
1734 __acquires(&bp->phy_lock)
1735 {
1736         u32 adv, bmcr;
1737         u32 new_adv = 0;
1738
1739         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1740                 return (bnx2_setup_remote_phy(bp, port));
1741
1742         if (!(bp->autoneg & AUTONEG_SPEED)) {
1743                 u32 new_bmcr;
1744                 int force_link_down = 0;
1745
1746                 if (bp->req_line_speed == SPEED_2500) {
1747                         if (!bnx2_test_and_enable_2g5(bp))
1748                                 force_link_down = 1;
1749                 } else if (bp->req_line_speed == SPEED_1000) {
1750                         if (bnx2_test_and_disable_2g5(bp))
1751                                 force_link_down = 1;
1752                 }
1753                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1754                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1755
1756                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1757                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1758                 new_bmcr |= BMCR_SPEED1000;
1759
1760                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1761                         if (bp->req_line_speed == SPEED_2500)
1762                                 bnx2_enable_forced_2g5(bp);
1763                         else if (bp->req_line_speed == SPEED_1000) {
1764                                 bnx2_disable_forced_2g5(bp);
1765                                 new_bmcr &= ~0x2000;
1766                         }
1767
1768                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1769                         if (bp->req_line_speed == SPEED_2500)
1770                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771                         else
1772                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1773                 }
1774
1775                 if (bp->req_duplex == DUPLEX_FULL) {
1776                         adv |= ADVERTISE_1000XFULL;
1777                         new_bmcr |= BMCR_FULLDPLX;
1778                 }
1779                 else {
1780                         adv |= ADVERTISE_1000XHALF;
1781                         new_bmcr &= ~BMCR_FULLDPLX;
1782                 }
1783                 if ((new_bmcr != bmcr) || (force_link_down)) {
1784                         /* Force a link down visible on the other side */
1785                         if (bp->link_up) {
1786                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1787                                                ~(ADVERTISE_1000XFULL |
1788                                                  ADVERTISE_1000XHALF));
1789                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1790                                         BMCR_ANRESTART | BMCR_ANENABLE);
1791
1792                                 bp->link_up = 0;
1793                                 netif_carrier_off(bp->dev);
1794                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1795                                 bnx2_report_link(bp);
1796                         }
1797                         bnx2_write_phy(bp, bp->mii_adv, adv);
1798                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799                 } else {
1800                         bnx2_resolve_flow_ctrl(bp);
1801                         bnx2_set_mac_link(bp);
1802                 }
1803                 return 0;
1804         }
1805
1806         bnx2_test_and_enable_2g5(bp);
1807
1808         if (bp->advertising & ADVERTISED_1000baseT_Full)
1809                 new_adv |= ADVERTISE_1000XFULL;
1810
1811         new_adv |= bnx2_phy_get_pause_adv(bp);
1812
1813         bnx2_read_phy(bp, bp->mii_adv, &adv);
1814         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1815
1816         bp->serdes_an_pending = 0;
1817         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1818                 /* Force a link down visible on the other side */
1819                 if (bp->link_up) {
1820                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1821                         spin_unlock_bh(&bp->phy_lock);
1822                         msleep(20);
1823                         spin_lock_bh(&bp->phy_lock);
1824                 }
1825
1826                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1827                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1828                         BMCR_ANENABLE);
1829                 /* Speed up link-up time when the link partner
1830                  * does not autonegotiate which is very common
1831                  * in blade servers. Some blade servers use
1832                  * IPMI for kerboard input and it's important
1833                  * to minimize link disruptions. Autoneg. involves
1834                  * exchanging base pages plus 3 next pages and
1835                  * normally completes in about 120 msec.
1836                  */
1837                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1838                 bp->serdes_an_pending = 1;
1839                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1840         } else {
1841                 bnx2_resolve_flow_ctrl(bp);
1842                 bnx2_set_mac_link(bp);
1843         }
1844
1845         return 0;
1846 }
1847
1848 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1849         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1850                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1851                 (ADVERTISED_1000baseT_Full)
1852
1853 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1854         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1855         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1856         ADVERTISED_1000baseT_Full)
1857
1858 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1859         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1860
1861 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1862
1863 static void
1864 bnx2_set_default_remote_link(struct bnx2 *bp)
1865 {
1866         u32 link;
1867
1868         if (bp->phy_port == PORT_TP)
1869                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1870         else
1871                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1872
1873         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1874                 bp->req_line_speed = 0;
1875                 bp->autoneg |= AUTONEG_SPEED;
1876                 bp->advertising = ADVERTISED_Autoneg;
1877                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1878                         bp->advertising |= ADVERTISED_10baseT_Half;
1879                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1880                         bp->advertising |= ADVERTISED_10baseT_Full;
1881                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1882                         bp->advertising |= ADVERTISED_100baseT_Half;
1883                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1884                         bp->advertising |= ADVERTISED_100baseT_Full;
1885                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1886                         bp->advertising |= ADVERTISED_1000baseT_Full;
1887                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1888                         bp->advertising |= ADVERTISED_2500baseX_Full;
1889         } else {
1890                 bp->autoneg = 0;
1891                 bp->advertising = 0;
1892                 bp->req_duplex = DUPLEX_FULL;
1893                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1894                         bp->req_line_speed = SPEED_10;
1895                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1896                                 bp->req_duplex = DUPLEX_HALF;
1897                 }
1898                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1899                         bp->req_line_speed = SPEED_100;
1900                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1901                                 bp->req_duplex = DUPLEX_HALF;
1902                 }
1903                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1904                         bp->req_line_speed = SPEED_1000;
1905                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1906                         bp->req_line_speed = SPEED_2500;
1907         }
1908 }
1909
1910 static void
1911 bnx2_set_default_link(struct bnx2 *bp)
1912 {
1913         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1914                 bnx2_set_default_remote_link(bp);
1915                 return;
1916         }
1917
1918         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1919         bp->req_line_speed = 0;
1920         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1921                 u32 reg;
1922
1923                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1924
1925                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1926                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1927                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1928                         bp->autoneg = 0;
1929                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1930                         bp->req_duplex = DUPLEX_FULL;
1931                 }
1932         } else
1933                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1934 }
1935
1936 static void
1937 bnx2_send_heart_beat(struct bnx2 *bp)
1938 {
1939         u32 msg;
1940         u32 addr;
1941
1942         spin_lock(&bp->indirect_lock);
1943         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1944         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1945         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1946         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947         spin_unlock(&bp->indirect_lock);
1948 }
1949
1950 static void
1951 bnx2_remote_phy_event(struct bnx2 *bp)
1952 {
1953         u32 msg;
1954         u8 link_up = bp->link_up;
1955         u8 old_port;
1956
1957         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1958
1959         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1960                 bnx2_send_heart_beat(bp);
1961
1962         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1963
1964         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1965                 bp->link_up = 0;
1966         else {
1967                 u32 speed;
1968
1969                 bp->link_up = 1;
1970                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1971                 bp->duplex = DUPLEX_FULL;
1972                 switch (speed) {
1973                         case BNX2_LINK_STATUS_10HALF:
1974                                 bp->duplex = DUPLEX_HALF;
1975                         case BNX2_LINK_STATUS_10FULL:
1976                                 bp->line_speed = SPEED_10;
1977                                 break;
1978                         case BNX2_LINK_STATUS_100HALF:
1979                                 bp->duplex = DUPLEX_HALF;
1980                         case BNX2_LINK_STATUS_100BASE_T4:
1981                         case BNX2_LINK_STATUS_100FULL:
1982                                 bp->line_speed = SPEED_100;
1983                                 break;
1984                         case BNX2_LINK_STATUS_1000HALF:
1985                                 bp->duplex = DUPLEX_HALF;
1986                         case BNX2_LINK_STATUS_1000FULL:
1987                                 bp->line_speed = SPEED_1000;
1988                                 break;
1989                         case BNX2_LINK_STATUS_2500HALF:
1990                                 bp->duplex = DUPLEX_HALF;
1991                         case BNX2_LINK_STATUS_2500FULL:
1992                                 bp->line_speed = SPEED_2500;
1993                                 break;
1994                         default:
1995                                 bp->line_speed = 0;
1996                                 break;
1997                 }
1998
1999                 bp->flow_ctrl = 0;
2000                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2001                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2002                         if (bp->duplex == DUPLEX_FULL)
2003                                 bp->flow_ctrl = bp->req_flow_ctrl;
2004                 } else {
2005                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2006                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2007                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2008                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2009                 }
2010
2011                 old_port = bp->phy_port;
2012                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2013                         bp->phy_port = PORT_FIBRE;
2014                 else
2015                         bp->phy_port = PORT_TP;
2016
2017                 if (old_port != bp->phy_port)
2018                         bnx2_set_default_link(bp);
2019
2020         }
2021         if (bp->link_up != link_up)
2022                 bnx2_report_link(bp);
2023
2024         bnx2_set_mac_link(bp);
2025 }
2026
2027 static int
2028 bnx2_set_remote_link(struct bnx2 *bp)
2029 {
2030         u32 evt_code;
2031
2032         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2033         switch (evt_code) {
2034                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2035                         bnx2_remote_phy_event(bp);
2036                         break;
2037                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2038                 default:
2039                         bnx2_send_heart_beat(bp);
2040                         break;
2041         }
2042         return 0;
2043 }
2044
2045 static int
2046 bnx2_setup_copper_phy(struct bnx2 *bp)
2047 __releases(&bp->phy_lock)
2048 __acquires(&bp->phy_lock)
2049 {
2050         u32 bmcr;
2051         u32 new_bmcr;
2052
2053         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2054
2055         if (bp->autoneg & AUTONEG_SPEED) {
2056                 u32 adv_reg, adv1000_reg;
2057                 u32 new_adv_reg = 0;
2058                 u32 new_adv1000_reg = 0;
2059
2060                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2061                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2062                         ADVERTISE_PAUSE_ASYM);
2063
2064                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2065                 adv1000_reg &= PHY_ALL_1000_SPEED;
2066
2067                 if (bp->advertising & ADVERTISED_10baseT_Half)
2068                         new_adv_reg |= ADVERTISE_10HALF;
2069                 if (bp->advertising & ADVERTISED_10baseT_Full)
2070                         new_adv_reg |= ADVERTISE_10FULL;
2071                 if (bp->advertising & ADVERTISED_100baseT_Half)
2072                         new_adv_reg |= ADVERTISE_100HALF;
2073                 if (bp->advertising & ADVERTISED_100baseT_Full)
2074                         new_adv_reg |= ADVERTISE_100FULL;
2075                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2076                         new_adv1000_reg |= ADVERTISE_1000FULL;
2077
2078                 new_adv_reg |= ADVERTISE_CSMA;
2079
2080                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2081
2082                 if ((adv1000_reg != new_adv1000_reg) ||
2083                         (adv_reg != new_adv_reg) ||
2084                         ((bmcr & BMCR_ANENABLE) == 0)) {
2085
2086                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2087                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2088                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2089                                 BMCR_ANENABLE);
2090                 }
2091                 else if (bp->link_up) {
2092                         /* Flow ctrl may have changed from auto to forced */
2093                         /* or vice-versa. */
2094
2095                         bnx2_resolve_flow_ctrl(bp);
2096                         bnx2_set_mac_link(bp);
2097                 }
2098                 return 0;
2099         }
2100
2101         new_bmcr = 0;
2102         if (bp->req_line_speed == SPEED_100) {
2103                 new_bmcr |= BMCR_SPEED100;
2104         }
2105         if (bp->req_duplex == DUPLEX_FULL) {
2106                 new_bmcr |= BMCR_FULLDPLX;
2107         }
2108         if (new_bmcr != bmcr) {
2109                 u32 bmsr;
2110
2111                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2113
2114                 if (bmsr & BMSR_LSTATUS) {
2115                         /* Force link down */
2116                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2117                         spin_unlock_bh(&bp->phy_lock);
2118                         msleep(50);
2119                         spin_lock_bh(&bp->phy_lock);
2120
2121                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2122                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2123                 }
2124
2125                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2126
2127                 /* Normally, the new speed is setup after the link has
2128                  * gone down and up again. In some cases, link will not go
2129                  * down so we need to set up the new speed here.
2130                  */
2131                 if (bmsr & BMSR_LSTATUS) {
2132                         bp->line_speed = bp->req_line_speed;
2133                         bp->duplex = bp->req_duplex;
2134                         bnx2_resolve_flow_ctrl(bp);
2135                         bnx2_set_mac_link(bp);
2136                 }
2137         } else {
2138                 bnx2_resolve_flow_ctrl(bp);
2139                 bnx2_set_mac_link(bp);
2140         }
2141         return 0;
2142 }
2143
2144 static int
2145 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2146 __releases(&bp->phy_lock)
2147 __acquires(&bp->phy_lock)
2148 {
2149         if (bp->loopback == MAC_LOOPBACK)
2150                 return 0;
2151
2152         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2153                 return (bnx2_setup_serdes_phy(bp, port));
2154         }
2155         else {
2156                 return (bnx2_setup_copper_phy(bp));
2157         }
2158 }
2159
2160 static int
2161 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2162 {
2163         u32 val;
2164
2165         bp->mii_bmcr = MII_BMCR + 0x10;
2166         bp->mii_bmsr = MII_BMSR + 0x10;
2167         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2168         bp->mii_adv = MII_ADVERTISE + 0x10;
2169         bp->mii_lpa = MII_LPA + 0x10;
2170         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2171
2172         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2173         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2174
2175         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2176         if (reset_phy)
2177                 bnx2_reset_phy(bp);
2178
2179         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2180
2181         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2182         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2183         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2184         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2185
2186         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2187         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2188         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2189                 val |= BCM5708S_UP1_2G5;
2190         else
2191                 val &= ~BCM5708S_UP1_2G5;
2192         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2193
2194         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2195         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2196         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2197         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2198
2199         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2200
2201         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2202               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2203         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2204
2205         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2206
2207         return 0;
2208 }
2209
2210 static int
2211 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2212 {
2213         u32 val;
2214
2215         if (reset_phy)
2216                 bnx2_reset_phy(bp);
2217
2218         bp->mii_up1 = BCM5708S_UP1;
2219
2220         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2221         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2222         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2223
2224         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2225         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2226         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2227
2228         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2229         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2230         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2231
2232         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2233                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2234                 val |= BCM5708S_UP1_2G5;
2235                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2236         }
2237
2238         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2239             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2240             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2241                 /* increase tx signal amplitude */
2242                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2243                                BCM5708S_BLK_ADDR_TX_MISC);
2244                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2245                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2246                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2247                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2248         }
2249
2250         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2251               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2252
2253         if (val) {
2254                 u32 is_backplane;
2255
2256                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2257                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2258                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2259                                        BCM5708S_BLK_ADDR_TX_MISC);
2260                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2261                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2262                                        BCM5708S_BLK_ADDR_DIG);
2263                 }
2264         }
2265         return 0;
2266 }
2267
2268 static int
2269 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2270 {
2271         if (reset_phy)
2272                 bnx2_reset_phy(bp);
2273
2274         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2275
2276         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2277                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2278
2279         if (bp->dev->mtu > 1500) {
2280                 u32 val;
2281
2282                 /* Set extended packet length bit */
2283                 bnx2_write_phy(bp, 0x18, 0x7);
2284                 bnx2_read_phy(bp, 0x18, &val);
2285                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2286
2287                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2288                 bnx2_read_phy(bp, 0x1c, &val);
2289                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2290         }
2291         else {
2292                 u32 val;
2293
2294                 bnx2_write_phy(bp, 0x18, 0x7);
2295                 bnx2_read_phy(bp, 0x18, &val);
2296                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2297
2298                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2299                 bnx2_read_phy(bp, 0x1c, &val);
2300                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2301         }
2302
2303         return 0;
2304 }
2305
2306 static int
2307 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2308 {
2309         u32 val;
2310
2311         if (reset_phy)
2312                 bnx2_reset_phy(bp);
2313
2314         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2315                 bnx2_write_phy(bp, 0x18, 0x0c00);
2316                 bnx2_write_phy(bp, 0x17, 0x000a);
2317                 bnx2_write_phy(bp, 0x15, 0x310b);
2318                 bnx2_write_phy(bp, 0x17, 0x201f);
2319                 bnx2_write_phy(bp, 0x15, 0x9506);
2320                 bnx2_write_phy(bp, 0x17, 0x401f);
2321                 bnx2_write_phy(bp, 0x15, 0x14e2);
2322                 bnx2_write_phy(bp, 0x18, 0x0400);
2323         }
2324
2325         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2326                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2327                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2328                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2329                 val &= ~(1 << 8);
2330                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2331         }
2332
2333         if (bp->dev->mtu > 1500) {
2334                 /* Set extended packet length bit */
2335                 bnx2_write_phy(bp, 0x18, 0x7);
2336                 bnx2_read_phy(bp, 0x18, &val);
2337                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2338
2339                 bnx2_read_phy(bp, 0x10, &val);
2340                 bnx2_write_phy(bp, 0x10, val | 0x1);
2341         }
2342         else {
2343                 bnx2_write_phy(bp, 0x18, 0x7);
2344                 bnx2_read_phy(bp, 0x18, &val);
2345                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2346
2347                 bnx2_read_phy(bp, 0x10, &val);
2348                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2349         }
2350
2351         /* ethernet@wirespeed */
2352         bnx2_write_phy(bp, 0x18, 0x7007);
2353         bnx2_read_phy(bp, 0x18, &val);
2354         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2355         return 0;
2356 }
2357
2358
2359 static int
2360 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2361 __releases(&bp->phy_lock)
2362 __acquires(&bp->phy_lock)
2363 {
2364         u32 val;
2365         int rc = 0;
2366
2367         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2368         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2369
2370         bp->mii_bmcr = MII_BMCR;
2371         bp->mii_bmsr = MII_BMSR;
2372         bp->mii_bmsr1 = MII_BMSR;
2373         bp->mii_adv = MII_ADVERTISE;
2374         bp->mii_lpa = MII_LPA;
2375
2376         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2377
2378         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2379                 goto setup_phy;
2380
2381         bnx2_read_phy(bp, MII_PHYSID1, &val);
2382         bp->phy_id = val << 16;
2383         bnx2_read_phy(bp, MII_PHYSID2, &val);
2384         bp->phy_id |= val & 0xffff;
2385
2386         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2387                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2388                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2389                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2390                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2391                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2392                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2393         }
2394         else {
2395                 rc = bnx2_init_copper_phy(bp, reset_phy);
2396         }
2397
2398 setup_phy:
2399         if (!rc)
2400                 rc = bnx2_setup_phy(bp, bp->phy_port);
2401
2402         return rc;
2403 }
2404
2405 static int
2406 bnx2_set_mac_loopback(struct bnx2 *bp)
2407 {
2408         u32 mac_mode;
2409
2410         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2411         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2412         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2413         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2414         bp->link_up = 1;
2415         return 0;
2416 }
2417
2418 static int bnx2_test_link(struct bnx2 *);
2419
2420 static int
2421 bnx2_set_phy_loopback(struct bnx2 *bp)
2422 {
2423         u32 mac_mode;
2424         int rc, i;
2425
2426         spin_lock_bh(&bp->phy_lock);
2427         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2428                             BMCR_SPEED1000);
2429         spin_unlock_bh(&bp->phy_lock);
2430         if (rc)
2431                 return rc;
2432
2433         for (i = 0; i < 10; i++) {
2434                 if (bnx2_test_link(bp) == 0)
2435                         break;
2436                 msleep(100);
2437         }
2438
2439         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2440         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2441                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2442                       BNX2_EMAC_MODE_25G_MODE);
2443
2444         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2445         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2446         bp->link_up = 1;
2447         return 0;
2448 }
2449
2450 static int
2451 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2452 {
2453         int i;
2454         u32 val;
2455
2456         bp->fw_wr_seq++;
2457         msg_data |= bp->fw_wr_seq;
2458
2459         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2460
2461         if (!ack)
2462                 return 0;
2463
2464         /* wait for an acknowledgement. */
2465         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2466                 msleep(10);
2467
2468                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2469
2470                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2471                         break;
2472         }
2473         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2474                 return 0;
2475
2476         /* If we timed out, inform the firmware that this is the case. */
2477         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2478                 if (!silent)
2479                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2480
2481                 msg_data &= ~BNX2_DRV_MSG_CODE;
2482                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2483
2484                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2485
2486                 return -EBUSY;
2487         }
2488
2489         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2490                 return -EIO;
2491
2492         return 0;
2493 }
2494
2495 static int
2496 bnx2_init_5709_context(struct bnx2 *bp)
2497 {
2498         int i, ret = 0;
2499         u32 val;
2500
2501         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2502         val |= (BCM_PAGE_BITS - 8) << 16;
2503         REG_WR(bp, BNX2_CTX_COMMAND, val);
2504         for (i = 0; i < 10; i++) {
2505                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2506                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2507                         break;
2508                 udelay(2);
2509         }
2510         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2511                 return -EBUSY;
2512
2513         for (i = 0; i < bp->ctx_pages; i++) {
2514                 int j;
2515
2516                 if (bp->ctx_blk[i])
2517                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2518                 else
2519                         return -ENOMEM;
2520
2521                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2522                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2523                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2524                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2525                        (u64) bp->ctx_blk_mapping[i] >> 32);
2526                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2527                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2528                 for (j = 0; j < 10; j++) {
2529
2530                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2531                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2532                                 break;
2533                         udelay(5);
2534                 }
2535                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2536                         ret = -EBUSY;
2537                         break;
2538                 }
2539         }
2540         return ret;
2541 }
2542
2543 static void
2544 bnx2_init_context(struct bnx2 *bp)
2545 {
2546         u32 vcid;
2547
2548         vcid = 96;
2549         while (vcid) {
2550                 u32 vcid_addr, pcid_addr, offset;
2551                 int i;
2552
2553                 vcid--;
2554
2555                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2556                         u32 new_vcid;
2557
2558                         vcid_addr = GET_PCID_ADDR(vcid);
2559                         if (vcid & 0x8) {
2560                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2561                         }
2562                         else {
2563                                 new_vcid = vcid;
2564                         }
2565                         pcid_addr = GET_PCID_ADDR(new_vcid);
2566                 }
2567                 else {
2568                         vcid_addr = GET_CID_ADDR(vcid);
2569                         pcid_addr = vcid_addr;
2570                 }
2571
2572                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2573                         vcid_addr += (i << PHY_CTX_SHIFT);
2574                         pcid_addr += (i << PHY_CTX_SHIFT);
2575
2576                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2577                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2578
2579                         /* Zero out the context. */
2580                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2581                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2582                 }
2583         }
2584 }
2585
2586 static int
2587 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2588 {
2589         u16 *good_mbuf;
2590         u32 good_mbuf_cnt;
2591         u32 val;
2592
2593         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2594         if (good_mbuf == NULL) {
2595                 pr_err("Failed to allocate memory in %s\n", __func__);
2596                 return -ENOMEM;
2597         }
2598
2599         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2600                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2601
2602         good_mbuf_cnt = 0;
2603
2604         /* Allocate a bunch of mbufs and save the good ones in an array. */
2605         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2606         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2607                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2608                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2609
2610                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2611
2612                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2613
2614                 /* The addresses with Bit 9 set are bad memory blocks. */
2615                 if (!(val & (1 << 9))) {
2616                         good_mbuf[good_mbuf_cnt] = (u16) val;
2617                         good_mbuf_cnt++;
2618                 }
2619
2620                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2621         }
2622
2623         /* Free the good ones back to the mbuf pool thus discarding
2624          * all the bad ones. */
2625         while (good_mbuf_cnt) {
2626                 good_mbuf_cnt--;
2627
2628                 val = good_mbuf[good_mbuf_cnt];
2629                 val = (val << 9) | val | 1;
2630
2631                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2632         }
2633         kfree(good_mbuf);
2634         return 0;
2635 }
2636
2637 static void
2638 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2639 {
2640         u32 val;
2641
2642         val = (mac_addr[0] << 8) | mac_addr[1];
2643
2644         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2645
2646         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2647                 (mac_addr[4] << 8) | mac_addr[5];
2648
2649         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2650 }
2651
2652 static inline int
2653 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2654 {
2655         dma_addr_t mapping;
2656         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2657         struct rx_bd *rxbd =
2658                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2659         struct page *page = alloc_page(GFP_ATOMIC);
2660
2661         if (!page)
2662                 return -ENOMEM;
2663         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2664                                PCI_DMA_FROMDEVICE);
2665         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2666                 __free_page(page);
2667                 return -EIO;
2668         }
2669
2670         rx_pg->page = page;
2671         pci_unmap_addr_set(rx_pg, mapping, mapping);
2672         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2673         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2674         return 0;
2675 }
2676
2677 static void
2678 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2679 {
2680         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2681         struct page *page = rx_pg->page;
2682
2683         if (!page)
2684                 return;
2685
2686         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2687                        PCI_DMA_FROMDEVICE);
2688
2689         __free_page(page);
2690         rx_pg->page = NULL;
2691 }
2692
2693 static inline int
2694 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2695 {
2696         struct sk_buff *skb;
2697         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2698         dma_addr_t mapping;
2699         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2700         unsigned long align;
2701
2702         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2703         if (skb == NULL) {
2704                 return -ENOMEM;
2705         }
2706
2707         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2708                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2709
2710         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2711                 PCI_DMA_FROMDEVICE);
2712         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2713                 dev_kfree_skb(skb);
2714                 return -EIO;
2715         }
2716
2717         rx_buf->skb = skb;
2718         pci_unmap_addr_set(rx_buf, mapping, mapping);
2719
2720         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2721         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2722
2723         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2724
2725         return 0;
2726 }
2727
2728 static int
2729 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2730 {
2731         struct status_block *sblk = bnapi->status_blk.msi;
2732         u32 new_link_state, old_link_state;
2733         int is_set = 1;
2734
2735         new_link_state = sblk->status_attn_bits & event;
2736         old_link_state = sblk->status_attn_bits_ack & event;
2737         if (new_link_state != old_link_state) {
2738                 if (new_link_state)
2739                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2740                 else
2741                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2742         } else
2743                 is_set = 0;
2744
2745         return is_set;
2746 }
2747
2748 static void
2749 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2750 {
2751         spin_lock(&bp->phy_lock);
2752
2753         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2754                 bnx2_set_link(bp);
2755         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2756                 bnx2_set_remote_link(bp);
2757
2758         spin_unlock(&bp->phy_lock);
2759
2760 }
2761
2762 static inline u16
2763 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2764 {
2765         u16 cons;
2766
2767         /* Tell compiler that status block fields can change. */
2768         barrier();
2769         cons = *bnapi->hw_tx_cons_ptr;
2770         barrier();
2771         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2772                 cons++;
2773         return cons;
2774 }
2775
2776 static int
2777 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2778 {
2779         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2780         u16 hw_cons, sw_cons, sw_ring_cons;
2781         int tx_pkt = 0, index;
2782         struct netdev_queue *txq;
2783
2784         index = (bnapi - bp->bnx2_napi);
2785         txq = netdev_get_tx_queue(bp->dev, index);
2786
2787         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2788         sw_cons = txr->tx_cons;
2789
2790         while (sw_cons != hw_cons) {
2791                 struct sw_tx_bd *tx_buf;
2792                 struct sk_buff *skb;
2793                 int i, last;
2794
2795                 sw_ring_cons = TX_RING_IDX(sw_cons);
2796
2797                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2798                 skb = tx_buf->skb;
2799
2800                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2801                 prefetch(&skb->end);
2802
2803                 /* partial BD completions possible with TSO packets */
2804                 if (tx_buf->is_gso) {
2805                         u16 last_idx, last_ring_idx;
2806
2807                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2808                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2809                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2810                                 last_idx++;
2811                         }
2812                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2813                                 break;
2814                         }
2815                 }
2816
2817                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2818                         skb_headlen(skb), PCI_DMA_TODEVICE);
2819
2820                 tx_buf->skb = NULL;
2821                 last = tx_buf->nr_frags;
2822
2823                 for (i = 0; i < last; i++) {
2824                         sw_cons = NEXT_TX_BD(sw_cons);
2825
2826                         pci_unmap_page(bp->pdev,
2827                                 pci_unmap_addr(
2828                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2829                                         mapping),
2830                                 skb_shinfo(skb)->frags[i].size,
2831                                 PCI_DMA_TODEVICE);
2832                 }
2833
2834                 sw_cons = NEXT_TX_BD(sw_cons);
2835
2836                 dev_kfree_skb(skb);
2837                 tx_pkt++;
2838                 if (tx_pkt == budget)
2839                         break;
2840
2841                 if (hw_cons == sw_cons)
2842                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2843         }
2844
2845         txr->hw_tx_cons = hw_cons;
2846         txr->tx_cons = sw_cons;
2847
2848         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2849          * before checking for netif_tx_queue_stopped().  Without the
2850          * memory barrier, there is a small possibility that bnx2_start_xmit()
2851          * will miss it and cause the queue to be stopped forever.
2852          */
2853         smp_mb();
2854
2855         if (unlikely(netif_tx_queue_stopped(txq)) &&
2856                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2857                 __netif_tx_lock(txq, smp_processor_id());
2858                 if ((netif_tx_queue_stopped(txq)) &&
2859                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2860                         netif_tx_wake_queue(txq);
2861                 __netif_tx_unlock(txq);
2862         }
2863
2864         return tx_pkt;
2865 }
2866
2867 static void
2868 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2869                         struct sk_buff *skb, int count)
2870 {
2871         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2872         struct rx_bd *cons_bd, *prod_bd;
2873         int i;
2874         u16 hw_prod, prod;
2875         u16 cons = rxr->rx_pg_cons;
2876
2877         cons_rx_pg = &rxr->rx_pg_ring[cons];
2878
2879         /* The caller was unable to allocate a new page to replace the
2880          * last one in the frags array, so we need to recycle that page
2881          * and then free the skb.
2882          */
2883         if (skb) {
2884                 struct page *page;
2885                 struct skb_shared_info *shinfo;
2886
2887                 shinfo = skb_shinfo(skb);
2888                 shinfo->nr_frags--;
2889                 page = shinfo->frags[shinfo->nr_frags].page;
2890                 shinfo->frags[shinfo->nr_frags].page = NULL;
2891
2892                 cons_rx_pg->page = page;
2893                 dev_kfree_skb(skb);
2894         }
2895
2896         hw_prod = rxr->rx_pg_prod;
2897
2898         for (i = 0; i < count; i++) {
2899                 prod = RX_PG_RING_IDX(hw_prod);
2900
2901                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2902                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2903                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2904                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2905
2906                 if (prod != cons) {
2907                         prod_rx_pg->page = cons_rx_pg->page;
2908                         cons_rx_pg->page = NULL;
2909                         pci_unmap_addr_set(prod_rx_pg, mapping,
2910                                 pci_unmap_addr(cons_rx_pg, mapping));
2911
2912                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2913                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2914
2915                 }
2916                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2917                 hw_prod = NEXT_RX_BD(hw_prod);
2918         }
2919         rxr->rx_pg_prod = hw_prod;
2920         rxr->rx_pg_cons = cons;
2921 }
2922
2923 static inline void
2924 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2925                   struct sk_buff *skb, u16 cons, u16 prod)
2926 {
2927         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2928         struct rx_bd *cons_bd, *prod_bd;
2929
2930         cons_rx_buf = &rxr->rx_buf_ring[cons];
2931         prod_rx_buf = &rxr->rx_buf_ring[prod];
2932
2933         pci_dma_sync_single_for_device(bp->pdev,
2934                 pci_unmap_addr(cons_rx_buf, mapping),
2935                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2936
2937         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2938
2939         prod_rx_buf->skb = skb;
2940
2941         if (cons == prod)
2942                 return;
2943
2944         pci_unmap_addr_set(prod_rx_buf, mapping,
2945                         pci_unmap_addr(cons_rx_buf, mapping));
2946
2947         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2948         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2949         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2950         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2951 }
2952
2953 static int
2954 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2955             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2956             u32 ring_idx)
2957 {
2958         int err;
2959         u16 prod = ring_idx & 0xffff;
2960
2961         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2962         if (unlikely(err)) {
2963                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2964                 if (hdr_len) {
2965                         unsigned int raw_len = len + 4;
2966                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2967
2968                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2969                 }
2970                 return err;
2971         }
2972
2973         skb_reserve(skb, BNX2_RX_OFFSET);
2974         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2975                          PCI_DMA_FROMDEVICE);
2976
2977         if (hdr_len == 0) {
2978                 skb_put(skb, len);
2979                 return 0;
2980         } else {
2981                 unsigned int i, frag_len, frag_size, pages;
2982                 struct sw_pg *rx_pg;
2983                 u16 pg_cons = rxr->rx_pg_cons;
2984                 u16 pg_prod = rxr->rx_pg_prod;
2985
2986                 frag_size = len + 4 - hdr_len;
2987                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2988                 skb_put(skb, hdr_len);
2989
2990                 for (i = 0; i < pages; i++) {
2991                         dma_addr_t mapping_old;
2992
2993                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2994                         if (unlikely(frag_len <= 4)) {
2995                                 unsigned int tail = 4 - frag_len;
2996
2997                                 rxr->rx_pg_cons = pg_cons;
2998                                 rxr->rx_pg_prod = pg_prod;
2999                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3000                                                         pages - i);
3001                                 skb->len -= tail;
3002                                 if (i == 0) {
3003                                         skb->tail -= tail;
3004                                 } else {
3005                                         skb_frag_t *frag =
3006                                                 &skb_shinfo(skb)->frags[i - 1];
3007                                         frag->size -= tail;
3008                                         skb->data_len -= tail;
3009                                         skb->truesize -= tail;
3010                                 }
3011                                 return 0;
3012                         }
3013                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3014
3015                         /* Don't unmap yet.  If we're unable to allocate a new
3016                          * page, we need to recycle the page and the DMA addr.
3017                          */
3018                         mapping_old = pci_unmap_addr(rx_pg, mapping);
3019                         if (i == pages - 1)
3020                                 frag_len -= 4;
3021
3022                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3023                         rx_pg->page = NULL;
3024
3025                         err = bnx2_alloc_rx_page(bp, rxr,
3026                                                  RX_PG_RING_IDX(pg_prod));
3027                         if (unlikely(err)) {
3028                                 rxr->rx_pg_cons = pg_cons;
3029                                 rxr->rx_pg_prod = pg_prod;
3030                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3031                                                         pages - i);
3032                                 return err;
3033                         }
3034
3035                         pci_unmap_page(bp->pdev, mapping_old,
3036                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3037
3038                         frag_size -= frag_len;
3039                         skb->data_len += frag_len;
3040                         skb->truesize += frag_len;
3041                         skb->len += frag_len;
3042
3043                         pg_prod = NEXT_RX_BD(pg_prod);
3044                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3045                 }
3046                 rxr->rx_pg_prod = pg_prod;
3047                 rxr->rx_pg_cons = pg_cons;
3048         }
3049         return 0;
3050 }
3051
3052 static inline u16
3053 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3054 {
3055         u16 cons;
3056
3057         /* Tell compiler that status block fields can change. */
3058         barrier();
3059         cons = *bnapi->hw_rx_cons_ptr;
3060         barrier();
3061         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3062                 cons++;
3063         return cons;
3064 }
3065
3066 static int
3067 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3068 {
3069         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3070         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3071         struct l2_fhdr *rx_hdr;
3072         int rx_pkt = 0, pg_ring_used = 0;
3073
3074         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3075         sw_cons = rxr->rx_cons;
3076         sw_prod = rxr->rx_prod;
3077
3078         /* Memory barrier necessary as speculative reads of the rx
3079          * buffer can be ahead of the index in the status block
3080          */
3081         rmb();
3082         while (sw_cons != hw_cons) {
3083                 unsigned int len, hdr_len;
3084                 u32 status;
3085                 struct sw_bd *rx_buf;
3086                 struct sk_buff *skb;
3087                 dma_addr_t dma_addr;
3088                 u16 vtag = 0;
3089                 int hw_vlan __maybe_unused = 0;
3090
3091                 sw_ring_cons = RX_RING_IDX(sw_cons);
3092                 sw_ring_prod = RX_RING_IDX(sw_prod);
3093
3094                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3095                 skb = rx_buf->skb;
3096
3097                 rx_buf->skb = NULL;
3098
3099                 dma_addr = pci_unmap_addr(rx_buf, mapping);
3100
3101                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3102                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3103                         PCI_DMA_FROMDEVICE);
3104
3105                 rx_hdr = (struct l2_fhdr *) skb->data;
3106                 len = rx_hdr->l2_fhdr_pkt_len;
3107                 status = rx_hdr->l2_fhdr_status;
3108
3109                 hdr_len = 0;
3110                 if (status & L2_FHDR_STATUS_SPLIT) {
3111                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3112                         pg_ring_used = 1;
3113                 } else if (len > bp->rx_jumbo_thresh) {
3114                         hdr_len = bp->rx_jumbo_thresh;
3115                         pg_ring_used = 1;
3116                 }
3117
3118                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3119                                        L2_FHDR_ERRORS_PHY_DECODE |
3120                                        L2_FHDR_ERRORS_ALIGNMENT |
3121                                        L2_FHDR_ERRORS_TOO_SHORT |
3122                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3123
3124                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3125                                           sw_ring_prod);
3126                         if (pg_ring_used) {
3127                                 int pages;
3128
3129                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3130
3131                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3132                         }
3133                         goto next_rx;
3134                 }
3135
3136                 len -= 4;
3137
3138                 if (len <= bp->rx_copy_thresh) {
3139                         struct sk_buff *new_skb;
3140
3141                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3142                         if (new_skb == NULL) {
3143                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3144                                                   sw_ring_prod);
3145                                 goto next_rx;
3146                         }
3147
3148                         /* aligned copy */
3149                         skb_copy_from_linear_data_offset(skb,
3150                                                          BNX2_RX_OFFSET - 6,
3151                                       new_skb->data, len + 6);
3152                         skb_reserve(new_skb, 6);
3153                         skb_put(new_skb, len);
3154
3155                         bnx2_reuse_rx_skb(bp, rxr, skb,
3156                                 sw_ring_cons, sw_ring_prod);
3157
3158                         skb = new_skb;
3159                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3160                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3161                         goto next_rx;
3162
3163                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3164                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3165                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3166 #ifdef BCM_VLAN
3167                         if (bp->vlgrp)
3168                                 hw_vlan = 1;
3169                         else
3170 #endif
3171                         {
3172                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3173                                         __skb_push(skb, 4);
3174
3175                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3176                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3177                                 ve->h_vlan_TCI = htons(vtag);
3178                                 len += 4;
3179                         }
3180                 }
3181
3182                 skb->protocol = eth_type_trans(skb, bp->dev);
3183
3184                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3185                         (ntohs(skb->protocol) != 0x8100)) {
3186
3187                         dev_kfree_skb(skb);
3188                         goto next_rx;
3189
3190                 }
3191
3192                 skb->ip_summed = CHECKSUM_NONE;
3193                 if (bp->rx_csum &&
3194                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3195                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3196
3197                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3198                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3199                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3200                 }
3201
3202                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3203
3204 #ifdef BCM_VLAN
3205                 if (hw_vlan)
3206                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3207                 else
3208 #endif
3209                         netif_receive_skb(skb);
3210
3211                 rx_pkt++;
3212
3213 next_rx:
3214                 sw_cons = NEXT_RX_BD(sw_cons);
3215                 sw_prod = NEXT_RX_BD(sw_prod);
3216
3217                 if ((rx_pkt == budget))
3218                         break;
3219
3220                 /* Refresh hw_cons to see if there is new work */
3221                 if (sw_cons == hw_cons) {
3222                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3223                         rmb();
3224                 }
3225         }
3226         rxr->rx_cons = sw_cons;
3227         rxr->rx_prod = sw_prod;
3228
3229         if (pg_ring_used)
3230                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3231
3232         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3233
3234         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3235
3236         mmiowb();
3237
3238         return rx_pkt;
3239
3240 }
3241
3242 /* MSI ISR - The only difference between this and the INTx ISR
3243  * is that the MSI interrupt is always serviced.
3244  */
3245 static irqreturn_t
3246 bnx2_msi(int irq, void *dev_instance)
3247 {
3248         struct bnx2_napi *bnapi = dev_instance;
3249         struct bnx2 *bp = bnapi->bp;
3250
3251         prefetch(bnapi->status_blk.msi);
3252         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3253                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3254                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3255
3256         /* Return here if interrupt is disabled. */
3257         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3258                 return IRQ_HANDLED;
3259
3260         napi_schedule(&bnapi->napi);
3261
3262         return IRQ_HANDLED;
3263 }
3264
3265 static irqreturn_t
3266 bnx2_msi_1shot(int irq, void *dev_instance)
3267 {
3268         struct bnx2_napi *bnapi = dev_instance;
3269         struct bnx2 *bp = bnapi->bp;
3270
3271         prefetch(bnapi->status_blk.msi);
3272
3273         /* Return here if interrupt is disabled. */
3274         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3275                 return IRQ_HANDLED;
3276
3277         napi_schedule(&bnapi->napi);
3278
3279         return IRQ_HANDLED;
3280 }
3281
3282 static irqreturn_t
3283 bnx2_interrupt(int irq, void *dev_instance)
3284 {
3285         struct bnx2_napi *bnapi = dev_instance;
3286         struct bnx2 *bp = bnapi->bp;
3287         struct status_block *sblk = bnapi->status_blk.msi;
3288
3289         /* When using INTx, it is possible for the interrupt to arrive
3290          * at the CPU before the status block posted prior to the
3291          * interrupt. Reading a register will flush the status block.
3292          * When using MSI, the MSI message will always complete after
3293          * the status block write.
3294          */
3295         if ((sblk->status_idx == bnapi->last_status_idx) &&
3296             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3297              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3298                 return IRQ_NONE;
3299
3300         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3301                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3302                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3303
3304         /* Read back to deassert IRQ immediately to avoid too many
3305          * spurious interrupts.
3306          */
3307         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3308
3309         /* Return here if interrupt is shared and is disabled. */
3310         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3311                 return IRQ_HANDLED;
3312
3313         if (napi_schedule_prep(&bnapi->napi)) {
3314                 bnapi->last_status_idx = sblk->status_idx;
3315                 __napi_schedule(&bnapi->napi);
3316         }
3317
3318         return IRQ_HANDLED;
3319 }
3320
3321 static inline int
3322 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3323 {
3324         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3325         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3326
3327         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3328             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3329                 return 1;
3330         return 0;
3331 }
3332
3333 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3334                                  STATUS_ATTN_BITS_TIMER_ABORT)
3335
3336 static inline int
3337 bnx2_has_work(struct bnx2_napi *bnapi)
3338 {
3339         struct status_block *sblk = bnapi->status_blk.msi;
3340
3341         if (bnx2_has_fast_work(bnapi))
3342                 return 1;
3343
3344 #ifdef BCM_CNIC
3345         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3346                 return 1;
3347 #endif
3348
3349         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3350             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3351                 return 1;
3352
3353         return 0;
3354 }
3355
3356 static void
3357 bnx2_chk_missed_msi(struct bnx2 *bp)
3358 {
3359         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3360         u32 msi_ctrl;
3361
3362         if (bnx2_has_work(bnapi)) {
3363                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3364                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3365                         return;
3366
3367                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3368                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3369                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3370                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3371                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3372                 }
3373         }
3374
3375         bp->idle_chk_status_idx = bnapi->last_status_idx;
3376 }
3377
3378 #ifdef BCM_CNIC
3379 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3380 {
3381         struct cnic_ops *c_ops;
3382
3383         if (!bnapi->cnic_present)
3384                 return;
3385
3386         rcu_read_lock();
3387         c_ops = rcu_dereference(bp->cnic_ops);
3388         if (c_ops)
3389                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3390                                                       bnapi->status_blk.msi);
3391         rcu_read_unlock();
3392 }
3393 #endif
3394
3395 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3396 {
3397         struct status_block *sblk = bnapi->status_blk.msi;
3398         u32 status_attn_bits = sblk->status_attn_bits;
3399         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3400
3401         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3402             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3403
3404                 bnx2_phy_int(bp, bnapi);
3405
3406                 /* This is needed to take care of transient status
3407                  * during link changes.
3408                  */
3409                 REG_WR(bp, BNX2_HC_COMMAND,
3410                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3411                 REG_RD(bp, BNX2_HC_COMMAND);
3412         }
3413 }
3414
3415 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3416                           int work_done, int budget)
3417 {
3418         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3419         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3420
3421         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3422                 bnx2_tx_int(bp, bnapi, 0);
3423
3424         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3425                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3426
3427         return work_done;
3428 }
3429
3430 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3431 {
3432         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3433         struct bnx2 *bp = bnapi->bp;
3434         int work_done = 0;
3435         struct status_block_msix *sblk = bnapi->status_blk.msix;
3436
3437         while (1) {
3438                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3439                 if (unlikely(work_done >= budget))
3440                         break;
3441
3442                 bnapi->last_status_idx = sblk->status_idx;
3443                 /* status idx must be read before checking for more work. */
3444                 rmb();
3445                 if (likely(!bnx2_has_fast_work(bnapi))) {
3446
3447                         napi_complete(napi);
3448                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3449                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3450                                bnapi->last_status_idx);
3451                         break;
3452                 }
3453         }
3454         return work_done;
3455 }
3456
3457 static int bnx2_poll(struct napi_struct *napi, int budget)
3458 {
3459         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3460         struct bnx2 *bp = bnapi->bp;
3461         int work_done = 0;
3462         struct status_block *sblk = bnapi->status_blk.msi;
3463
3464         while (1) {
3465                 bnx2_poll_link(bp, bnapi);
3466
3467                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3468
3469 #ifdef BCM_CNIC
3470                 bnx2_poll_cnic(bp, bnapi);
3471 #endif
3472
3473                 /* bnapi->last_status_idx is used below to tell the hw how
3474                  * much work has been processed, so we must read it before
3475                  * checking for more work.
3476                  */
3477                 bnapi->last_status_idx = sblk->status_idx;
3478
3479                 if (unlikely(work_done >= budget))
3480                         break;
3481
3482                 rmb();
3483                 if (likely(!bnx2_has_work(bnapi))) {
3484                         napi_complete(napi);
3485                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3486                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3487                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3488                                        bnapi->last_status_idx);
3489                                 break;
3490                         }
3491                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3492                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3493                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3494                                bnapi->last_status_idx);
3495
3496                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3497                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3498                                bnapi->last_status_idx);
3499                         break;
3500                 }
3501         }
3502
3503         return work_done;
3504 }
3505
3506 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3507  * from set_multicast.
3508  */
3509 static void
3510 bnx2_set_rx_mode(struct net_device *dev)
3511 {
3512         struct bnx2 *bp = netdev_priv(dev);
3513         u32 rx_mode, sort_mode;
3514         struct netdev_hw_addr *ha;
3515         int i;
3516
3517         if (!netif_running(dev))
3518                 return;
3519
3520         spin_lock_bh(&bp->phy_lock);
3521
3522         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3523                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3524         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3525 #ifdef BCM_VLAN
3526         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3527                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3528 #else
3529         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3530                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3531 #endif
3532         if (dev->flags & IFF_PROMISC) {
3533                 /* Promiscuous mode. */
3534                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3535                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3536                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3537         }
3538         else if (dev->flags & IFF_ALLMULTI) {
3539                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3540                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3541                                0xffffffff);
3542                 }
3543                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3544         }
3545         else {
3546                 /* Accept one or more multicast(s). */
3547                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3548                 u32 regidx;
3549                 u32 bit;
3550                 u32 crc;
3551
3552                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3553
3554                 netdev_for_each_mc_addr(ha, dev) {
3555                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3556                         bit = crc & 0xff;
3557                         regidx = (bit & 0xe0) >> 5;
3558                         bit &= 0x1f;
3559                         mc_filter[regidx] |= (1 << bit);
3560                 }
3561
3562                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3563                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3564                                mc_filter[i]);
3565                 }
3566
3567                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3568         }
3569
3570         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3571                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3572                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3573                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3574         } else if (!(dev->flags & IFF_PROMISC)) {
3575                 /* Add all entries into to the match filter list */
3576                 i = 0;
3577                 netdev_for_each_uc_addr(ha, dev) {
3578                         bnx2_set_mac_addr(bp, ha->addr,
3579                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3580                         sort_mode |= (1 <<
3581                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3582                         i++;
3583                 }
3584
3585         }
3586
3587         if (rx_mode != bp->rx_mode) {
3588                 bp->rx_mode = rx_mode;
3589                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3590         }
3591
3592         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3593         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3594         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3595
3596         spin_unlock_bh(&bp->phy_lock);
3597 }
3598
3599 static int __devinit
3600 check_fw_section(const struct firmware *fw,
3601                  const struct bnx2_fw_file_section *section,
3602                  u32 alignment, bool non_empty)
3603 {
3604         u32 offset = be32_to_cpu(section->offset);
3605         u32 len = be32_to_cpu(section->len);
3606
3607         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3608                 return -EINVAL;
3609         if ((non_empty && len == 0) || len > fw->size - offset ||
3610             len & (alignment - 1))
3611                 return -EINVAL;
3612         return 0;
3613 }
3614
3615 static int __devinit
3616 check_mips_fw_entry(const struct firmware *fw,
3617                     const struct bnx2_mips_fw_file_entry *entry)
3618 {
3619         if (check_fw_section(fw, &entry->text, 4, true) ||
3620             check_fw_section(fw, &entry->data, 4, false) ||
3621             check_fw_section(fw, &entry->rodata, 4, false))
3622                 return -EINVAL;
3623         return 0;
3624 }
3625
3626 static int __devinit
3627 bnx2_request_firmware(struct bnx2 *bp)
3628 {
3629         const char *mips_fw_file, *rv2p_fw_file;
3630         const struct bnx2_mips_fw_file *mips_fw;
3631         const struct bnx2_rv2p_fw_file *rv2p_fw;
3632         int rc;
3633
3634         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3635                 mips_fw_file = FW_MIPS_FILE_09;
3636                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3637                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3638                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3639                 else
3640                         rv2p_fw_file = FW_RV2P_FILE_09;
3641         } else {
3642                 mips_fw_file = FW_MIPS_FILE_06;
3643                 rv2p_fw_file = FW_RV2P_FILE_06;
3644         }
3645
3646         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3647         if (rc) {
3648                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3649                 return rc;
3650         }
3651
3652         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3653         if (rc) {
3654                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3655                 return rc;
3656         }
3657         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3658         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3659         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3660             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3661             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3662             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3663             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3664             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3665                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3666                 return -EINVAL;
3667         }
3668         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3669             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3670             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3671                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3672                 return -EINVAL;
3673         }
3674
3675         return 0;
3676 }
3677
3678 static u32
3679 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3680 {
3681         switch (idx) {
3682         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3683                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3684                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3685                 break;
3686         }
3687         return rv2p_code;
3688 }
3689
3690 static int
3691 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3692              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3693 {
3694         u32 rv2p_code_len, file_offset;
3695         __be32 *rv2p_code;
3696         int i;
3697         u32 val, cmd, addr;
3698
3699         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3700         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3701
3702         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3703
3704         if (rv2p_proc == RV2P_PROC1) {
3705                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3706                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3707         } else {
3708                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3709                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3710         }
3711
3712         for (i = 0; i < rv2p_code_len; i += 8) {
3713                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3714                 rv2p_code++;
3715                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3716                 rv2p_code++;
3717
3718                 val = (i / 8) | cmd;
3719                 REG_WR(bp, addr, val);
3720         }
3721
3722         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3723         for (i = 0; i < 8; i++) {
3724                 u32 loc, code;
3725
3726                 loc = be32_to_cpu(fw_entry->fixup[i]);
3727                 if (loc && ((loc * 4) < rv2p_code_len)) {
3728                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3729                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3730                         code = be32_to_cpu(*(rv2p_code + loc));
3731                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3732                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3733
3734                         val = (loc / 2) | cmd;
3735                         REG_WR(bp, addr, val);
3736                 }
3737         }
3738
3739         /* Reset the processor, un-stall is done later. */
3740         if (rv2p_proc == RV2P_PROC1) {
3741                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3742         }
3743         else {
3744                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3745         }
3746
3747         return 0;
3748 }
3749
3750 static int
3751 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3752             const struct bnx2_mips_fw_file_entry *fw_entry)
3753 {
3754         u32 addr, len, file_offset;
3755         __be32 *data;
3756         u32 offset;
3757         u32 val;
3758
3759         /* Halt the CPU. */
3760         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3761         val |= cpu_reg->mode_value_halt;
3762         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3763         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3764
3765         /* Load the Text area. */
3766         addr = be32_to_cpu(fw_entry->text.addr);
3767         len = be32_to_cpu(fw_entry->text.len);
3768         file_offset = be32_to_cpu(fw_entry->text.offset);
3769         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3770
3771         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3772         if (len) {
3773                 int j;
3774
3775                 for (j = 0; j < (len / 4); j++, offset += 4)
3776                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3777         }
3778
3779         /* Load the Data area. */
3780         addr = be32_to_cpu(fw_entry->data.addr);
3781         len = be32_to_cpu(fw_entry->data.len);
3782         file_offset = be32_to_cpu(fw_entry->data.offset);
3783         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3784
3785         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3786         if (len) {
3787                 int j;
3788
3789                 for (j = 0; j < (len / 4); j++, offset += 4)
3790                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3791         }
3792
3793         /* Load the Read-Only area. */
3794         addr = be32_to_cpu(fw_entry->rodata.addr);
3795         len = be32_to_cpu(fw_entry->rodata.len);
3796         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3797         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3798
3799         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3800         if (len) {
3801                 int j;
3802
3803                 for (j = 0; j < (len / 4); j++, offset += 4)
3804                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3805         }
3806
3807         /* Clear the pre-fetch instruction. */
3808         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3809
3810         val = be32_to_cpu(fw_entry->start_addr);
3811         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3812
3813         /* Start the CPU. */
3814         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3815         val &= ~cpu_reg->mode_value_halt;
3816         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3817         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3818
3819         return 0;
3820 }
3821
3822 static int
3823 bnx2_init_cpus(struct bnx2 *bp)
3824 {
3825         const struct bnx2_mips_fw_file *mips_fw =
3826                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3827         const struct bnx2_rv2p_fw_file *rv2p_fw =
3828                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3829         int rc;
3830
3831         /* Initialize the RV2P processor. */
3832         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3833         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3834
3835         /* Initialize the RX Processor. */
3836         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3837         if (rc)
3838                 goto init_cpu_err;
3839
3840         /* Initialize the TX Processor. */
3841         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3842         if (rc)
3843                 goto init_cpu_err;
3844
3845         /* Initialize the TX Patch-up Processor. */
3846         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3847         if (rc)
3848                 goto init_cpu_err;
3849
3850         /* Initialize the Completion Processor. */
3851         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3852         if (rc)
3853                 goto init_cpu_err;
3854
3855         /* Initialize the Command Processor. */
3856         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3857
3858 init_cpu_err:
3859         return rc;
3860 }
3861
3862 static int
3863 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3864 {
3865         u16 pmcsr;
3866
3867         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3868
3869         switch (state) {
3870         case PCI_D0: {
3871                 u32 val;
3872
3873                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3874                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3875                         PCI_PM_CTRL_PME_STATUS);
3876
3877                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3878                         /* delay required during transition out of D3hot */
3879                         msleep(20);
3880
3881                 val = REG_RD(bp, BNX2_EMAC_MODE);
3882                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3883                 val &= ~BNX2_EMAC_MODE_MPKT;
3884                 REG_WR(bp, BNX2_EMAC_MODE, val);
3885
3886                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3887                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3888                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3889                 break;
3890         }
3891         case PCI_D3hot: {
3892                 int i;
3893                 u32 val, wol_msg;
3894
3895                 if (bp->wol) {
3896                         u32 advertising;
3897                         u8 autoneg;
3898
3899                         autoneg = bp->autoneg;
3900                         advertising = bp->advertising;
3901
3902                         if (bp->phy_port == PORT_TP) {
3903                                 bp->autoneg = AUTONEG_SPEED;
3904                                 bp->advertising = ADVERTISED_10baseT_Half |
3905                                         ADVERTISED_10baseT_Full |
3906                                         ADVERTISED_100baseT_Half |
3907                                         ADVERTISED_100baseT_Full |
3908                                         ADVERTISED_Autoneg;
3909                         }
3910
3911                         spin_lock_bh(&bp->phy_lock);
3912                         bnx2_setup_phy(bp, bp->phy_port);
3913                         spin_unlock_bh(&bp->phy_lock);
3914
3915                         bp->autoneg = autoneg;
3916                         bp->advertising = advertising;
3917
3918                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3919
3920                         val = REG_RD(bp, BNX2_EMAC_MODE);
3921
3922                         /* Enable port mode. */
3923                         val &= ~BNX2_EMAC_MODE_PORT;
3924                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3925                                BNX2_EMAC_MODE_ACPI_RCVD |
3926                                BNX2_EMAC_MODE_MPKT;
3927                         if (bp->phy_port == PORT_TP)
3928                                 val |= BNX2_EMAC_MODE_PORT_MII;
3929                         else {
3930                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3931                                 if (bp->line_speed == SPEED_2500)
3932                                         val |= BNX2_EMAC_MODE_25G_MODE;
3933                         }
3934
3935                         REG_WR(bp, BNX2_EMAC_MODE, val);
3936
3937                         /* receive all multicast */
3938                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3939                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3940                                        0xffffffff);
3941                         }
3942                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3943                                BNX2_EMAC_RX_MODE_SORT_MODE);
3944
3945                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3946                               BNX2_RPM_SORT_USER0_MC_EN;
3947                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3948                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3949                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3950                                BNX2_RPM_SORT_USER0_ENA);
3951
3952                         /* Need to enable EMAC and RPM for WOL. */
3953                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3954                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3955                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3956                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3957
3958                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3959                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3960                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3961
3962                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3963                 }
3964                 else {
3965                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3966                 }
3967
3968                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3969                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3970                                      1, 0);
3971
3972                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3973                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3974                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3975
3976                         if (bp->wol)
3977                                 pmcsr |= 3;
3978                 }
3979                 else {
3980                         pmcsr |= 3;
3981                 }
3982                 if (bp->wol) {
3983                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3984                 }
3985                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3986                                       pmcsr);
3987
3988                 /* No more memory access after this point until
3989                  * device is brought back to D0.
3990                  */
3991                 udelay(50);
3992                 break;
3993         }
3994         default:
3995                 return -EINVAL;
3996         }
3997         return 0;
3998 }
3999
4000 static int
4001 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4002 {
4003         u32 val;
4004         int j;
4005
4006         /* Request access to the flash interface. */
4007         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4008         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4009                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4010                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4011                         break;
4012
4013                 udelay(5);
4014         }
4015
4016         if (j >= NVRAM_TIMEOUT_COUNT)
4017                 return -EBUSY;
4018
4019         return 0;
4020 }
4021
4022 static int
4023 bnx2_release_nvram_lock(struct bnx2 *bp)
4024 {
4025         int j;
4026         u32 val;
4027
4028         /* Relinquish nvram interface. */
4029         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4030
4031         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4032                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4033                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4034                         break;
4035
4036                 udelay(5);
4037         }
4038
4039         if (j >= NVRAM_TIMEOUT_COUNT)
4040                 return -EBUSY;
4041
4042         return 0;
4043 }
4044
4045
4046 static int
4047 bnx2_enable_nvram_write(struct bnx2 *bp)
4048 {
4049         u32 val;
4050
4051         val = REG_RD(bp, BNX2_MISC_CFG);
4052         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4053
4054         if (bp->flash_info->flags & BNX2_NV_WREN) {
4055                 int j;
4056
4057                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4058                 REG_WR(bp, BNX2_NVM_COMMAND,
4059                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4060
4061                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4062                         udelay(5);
4063
4064                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4065                         if (val & BNX2_NVM_COMMAND_DONE)
4066                                 break;
4067                 }
4068
4069                 if (j >= NVRAM_TIMEOUT_COUNT)
4070                         return -EBUSY;
4071         }
4072         return 0;
4073 }
4074
4075 static void
4076 bnx2_disable_nvram_write(struct bnx2 *bp)
4077 {
4078         u32 val;
4079
4080         val = REG_RD(bp, BNX2_MISC_CFG);
4081         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4082 }
4083
4084
4085 static void
4086 bnx2_enable_nvram_access(struct bnx2 *bp)
4087 {
4088         u32 val;
4089
4090         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4091         /* Enable both bits, even on read. */
4092         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4093                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4094 }
4095
4096 static void
4097 bnx2_disable_nvram_access(struct bnx2 *bp)
4098 {
4099         u32 val;
4100
4101         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4102         /* Disable both bits, even after read. */
4103         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4104                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4105                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4106 }
4107
4108 static int
4109 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4110 {
4111         u32 cmd;
4112         int j;
4113
4114         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4115                 /* Buffered flash, no erase needed */
4116                 return 0;
4117
4118         /* Build an erase command */
4119         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4120               BNX2_NVM_COMMAND_DOIT;
4121
4122         /* Need to clear DONE bit separately. */
4123         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4124
4125         /* Address of the NVRAM to read from. */
4126         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4127
4128         /* Issue an erase command. */
4129         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4130
4131         /* Wait for completion. */
4132         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4133                 u32 val;
4134
4135                 udelay(5);
4136
4137                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4138                 if (val & BNX2_NVM_COMMAND_DONE)
4139                         break;
4140         }
4141
4142         if (j >= NVRAM_TIMEOUT_COUNT)
4143                 return -EBUSY;
4144
4145         return 0;
4146 }
4147
4148 static int
4149 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4150 {
4151         u32 cmd;
4152         int j;
4153
4154         /* Build the command word. */
4155         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4156
4157         /* Calculate an offset of a buffered flash, not needed for 5709. */
4158         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4159                 offset = ((offset / bp->flash_info->page_size) <<
4160                            bp->flash_info->page_bits) +
4161                           (offset % bp->flash_info->page_size);
4162         }
4163
4164         /* Need to clear DONE bit separately. */
4165         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4166
4167         /* Address of the NVRAM to read from. */
4168         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4169
4170         /* Issue a read command. */
4171         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4172
4173         /* Wait for completion. */
4174         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4175                 u32 val;
4176
4177                 udelay(5);
4178
4179                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4180                 if (val & BNX2_NVM_COMMAND_DONE) {
4181                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4182                         memcpy(ret_val, &v, 4);
4183                         break;
4184                 }
4185         }
4186         if (j >= NVRAM_TIMEOUT_COUNT)
4187                 return -EBUSY;
4188
4189         return 0;
4190 }
4191
4192
4193 static int
4194 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4195 {
4196         u32 cmd;
4197         __be32 val32;
4198         int j;
4199
4200         /* Build the command word. */
4201         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4202
4203         /* Calculate an offset of a buffered flash, not needed for 5709. */
4204         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4205                 offset = ((offset / bp->flash_info->page_size) <<
4206                           bp->flash_info->page_bits) +
4207                          (offset % bp->flash_info->page_size);
4208         }
4209
4210         /* Need to clear DONE bit separately. */
4211         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4212
4213         memcpy(&val32, val, 4);
4214
4215         /* Write the data. */
4216         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4217
4218         /* Address of the NVRAM to write to. */
4219         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4220
4221         /* Issue the write command. */
4222         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4223
4224         /* Wait for completion. */
4225         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4226                 udelay(5);
4227
4228                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4229                         break;
4230         }
4231         if (j >= NVRAM_TIMEOUT_COUNT)
4232                 return -EBUSY;
4233
4234         return 0;
4235 }
4236
4237 static int
4238 bnx2_init_nvram(struct bnx2 *bp)
4239 {
4240         u32 val;
4241         int j, entry_count, rc = 0;
4242         const struct flash_spec *flash;
4243
4244         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4245                 bp->flash_info = &flash_5709;
4246                 goto get_flash_size;
4247         }
4248
4249         /* Determine the selected interface. */
4250         val = REG_RD(bp, BNX2_NVM_CFG1);
4251
4252         entry_count = ARRAY_SIZE(flash_table);
4253
4254         if (val & 0x40000000) {
4255
4256                 /* Flash interface has been reconfigured */
4257                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4258                      j++, flash++) {
4259                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4260                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4261                                 bp->flash_info = flash;
4262                                 break;
4263                         }
4264                 }
4265         }
4266         else {
4267                 u32 mask;
4268                 /* Not yet been reconfigured */
4269
4270                 if (val & (1 << 23))
4271                         mask = FLASH_BACKUP_STRAP_MASK;
4272                 else
4273                         mask = FLASH_STRAP_MASK;
4274
4275                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4276                         j++, flash++) {
4277
4278                         if ((val & mask) == (flash->strapping & mask)) {
4279                                 bp->flash_info = flash;
4280
4281                                 /* Request access to the flash interface. */
4282                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4283                                         return rc;
4284
4285                                 /* Enable access to flash interface */
4286                                 bnx2_enable_nvram_access(bp);
4287
4288                                 /* Reconfigure the flash interface */
4289                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4290                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4291                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4292                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4293
4294                                 /* Disable access to flash interface */
4295                                 bnx2_disable_nvram_access(bp);
4296                                 bnx2_release_nvram_lock(bp);
4297
4298                                 break;
4299                         }
4300                 }
4301         } /* if (val & 0x40000000) */
4302
4303         if (j == entry_count) {
4304                 bp->flash_info = NULL;
4305                 pr_alert("Unknown flash/EEPROM type\n");
4306                 return -ENODEV;
4307         }
4308
4309 get_flash_size:
4310         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4311         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4312         if (val)
4313                 bp->flash_size = val;
4314         else
4315                 bp->flash_size = bp->flash_info->total_size;
4316
4317         return rc;
4318 }
4319
4320 static int
4321 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4322                 int buf_size)
4323 {
4324         int rc = 0;
4325         u32 cmd_flags, offset32, len32, extra;
4326
4327         if (buf_size == 0)
4328                 return 0;
4329
4330         /* Request access to the flash interface. */
4331         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4332                 return rc;
4333
4334         /* Enable access to flash interface */
4335         bnx2_enable_nvram_access(bp);
4336
4337         len32 = buf_size;
4338         offset32 = offset;
4339         extra = 0;
4340
4341         cmd_flags = 0;
4342
4343         if (offset32 & 3) {
4344                 u8 buf[4];
4345                 u32 pre_len;
4346
4347                 offset32 &= ~3;
4348                 pre_len = 4 - (offset & 3);
4349
4350                 if (pre_len >= len32) {
4351                         pre_len = len32;
4352                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4353                                     BNX2_NVM_COMMAND_LAST;
4354                 }
4355                 else {
4356                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4357                 }
4358
4359                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4360
4361                 if (rc)
4362                         return rc;
4363
4364                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4365
4366                 offset32 += 4;
4367                 ret_buf += pre_len;
4368                 len32 -= pre_len;
4369         }
4370         if (len32 & 3) {
4371                 extra = 4 - (len32 & 3);
4372                 len32 = (len32 + 4) & ~3;
4373         }
4374
4375         if (len32 == 4) {
4376                 u8 buf[4];
4377
4378                 if (cmd_flags)
4379                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4380                 else
4381                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4382                                     BNX2_NVM_COMMAND_LAST;
4383
4384                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4385
4386                 memcpy(ret_buf, buf, 4 - extra);
4387         }
4388         else if (len32 > 0) {
4389                 u8 buf[4];
4390
4391                 /* Read the first word. */
4392                 if (cmd_flags)
4393                         cmd_flags = 0;
4394                 else
4395                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4396
4397                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4398
4399                 /* Advance to the next dword. */
4400                 offset32 += 4;
4401                 ret_buf += 4;
4402                 len32 -= 4;
4403
4404                 while (len32 > 4 && rc == 0) {
4405                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4406
4407                         /* Advance to the next dword. */
4408                         offset32 += 4;
4409                         ret_buf += 4;
4410                         len32 -= 4;
4411                 }
4412
4413                 if (rc)
4414                         return rc;
4415
4416                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4417                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4418
4419                 memcpy(ret_buf, buf, 4 - extra);
4420         }
4421
4422         /* Disable access to flash interface */
4423         bnx2_disable_nvram_access(bp);
4424
4425         bnx2_release_nvram_lock(bp);
4426
4427         return rc;
4428 }
4429
4430 static int
4431 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4432                 int buf_size)
4433 {
4434         u32 written, offset32, len32;
4435         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4436         int rc = 0;
4437         int align_start, align_end;
4438
4439         buf = data_buf;
4440         offset32 = offset;
4441         len32 = buf_size;
4442         align_start = align_end = 0;
4443
4444         if ((align_start = (offset32 & 3))) {
4445                 offset32 &= ~3;
4446                 len32 += align_start;
4447                 if (len32 < 4)
4448                         len32 = 4;
4449                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4450                         return rc;
4451         }
4452
4453         if (len32 & 3) {
4454                 align_end = 4 - (len32 & 3);
4455                 len32 += align_end;
4456                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4457                         return rc;
4458         }
4459
4460         if (align_start || align_end) {
4461                 align_buf = kmalloc(len32, GFP_KERNEL);
4462                 if (align_buf == NULL)
4463                         return -ENOMEM;
4464                 if (align_start) {
4465                         memcpy(align_buf, start, 4);
4466                 }
4467                 if (align_end) {
4468                         memcpy(align_buf + len32 - 4, end, 4);
4469                 }
4470                 memcpy(align_buf + align_start, data_buf, buf_size);
4471                 buf = align_buf;
4472         }
4473
4474         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4475                 flash_buffer = kmalloc(264, GFP_KERNEL);
4476                 if (flash_buffer == NULL) {
4477                         rc = -ENOMEM;
4478                         goto nvram_write_end;
4479                 }
4480         }
4481
4482         written = 0;
4483         while ((written < len32) && (rc == 0)) {
4484                 u32 page_start, page_end, data_start, data_end;
4485                 u32 addr, cmd_flags;
4486                 int i;
4487
4488                 /* Find the page_start addr */
4489                 page_start = offset32 + written;
4490                 page_start -= (page_start % bp->flash_info->page_size);
4491                 /* Find the page_end addr */
4492                 page_end = page_start + bp->flash_info->page_size;
4493                 /* Find the data_start addr */
4494                 data_start = (written == 0) ? offset32 : page_start;
4495                 /* Find the data_end addr */
4496                 data_end = (page_end > offset32 + len32) ?
4497                         (offset32 + len32) : page_end;
4498
4499                 /* Request access to the flash interface. */
4500                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4501                         goto nvram_write_end;
4502
4503                 /* Enable access to flash interface */
4504                 bnx2_enable_nvram_access(bp);
4505
4506                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4507                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4508                         int j;
4509
4510                         /* Read the whole page into the buffer
4511                          * (non-buffer flash only) */
4512                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4513                                 if (j == (bp->flash_info->page_size - 4)) {
4514                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4515                                 }
4516                                 rc = bnx2_nvram_read_dword(bp,
4517                                         page_start + j,
4518                                         &flash_buffer[j],
4519                                         cmd_flags);
4520
4521                                 if (rc)
4522                                         goto nvram_write_end;
4523
4524                                 cmd_flags = 0;
4525                         }
4526                 }
4527
4528                 /* Enable writes to flash interface (unlock write-protect) */
4529                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4530                         goto nvram_write_end;
4531
4532                 /* Loop to write back the buffer data from page_start to
4533                  * data_start */
4534                 i = 0;
4535                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4536                         /* Erase the page */
4537                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4538                                 goto nvram_write_end;
4539
4540                         /* Re-enable the write again for the actual write */
4541                         bnx2_enable_nvram_write(bp);
4542
4543                         for (addr = page_start; addr < data_start;
4544                                 addr += 4, i += 4) {
4545
4546                                 rc = bnx2_nvram_write_dword(bp, addr,
4547                                         &flash_buffer[i], cmd_flags);
4548
4549                                 if (rc != 0)
4550                                         goto nvram_write_end;
4551
4552                                 cmd_flags = 0;
4553                         }
4554                 }
4555
4556                 /* Loop to write the new data from data_start to data_end */
4557                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4558                         if ((addr == page_end - 4) ||
4559                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4560                                  (addr == data_end - 4))) {
4561
4562                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4563                         }
4564                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4565                                 cmd_flags);
4566
4567                         if (rc != 0)
4568                                 goto nvram_write_end;
4569
4570                         cmd_flags = 0;
4571                         buf += 4;
4572                 }
4573
4574                 /* Loop to write back the buffer data from data_end
4575                  * to page_end */
4576                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4577                         for (addr = data_end; addr < page_end;
4578                                 addr += 4, i += 4) {
4579
4580                                 if (addr == page_end-4) {
4581                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4582                                 }
4583                                 rc = bnx2_nvram_write_dword(bp, addr,
4584                                         &flash_buffer[i], cmd_flags);
4585
4586                                 if (rc != 0)
4587                                         goto nvram_write_end;
4588
4589                                 cmd_flags = 0;
4590                         }
4591                 }
4592
4593                 /* Disable writes to flash interface (lock write-protect) */
4594                 bnx2_disable_nvram_write(bp);
4595
4596                 /* Disable access to flash interface */
4597                 bnx2_disable_nvram_access(bp);
4598                 bnx2_release_nvram_lock(bp);
4599
4600                 /* Increment written */
4601                 written += data_end - data_start;
4602         }
4603
4604 nvram_write_end:
4605         kfree(flash_buffer);
4606         kfree(align_buf);
4607         return rc;
4608 }
4609
4610 static void
4611 bnx2_init_fw_cap(struct bnx2 *bp)
4612 {
4613         u32 val, sig = 0;
4614
4615         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4616         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4617
4618         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4619                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4620
4621         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4622         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4623                 return;
4624
4625         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4626                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4627                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4628         }
4629
4630         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4631             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4632                 u32 link;
4633
4634                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4635
4636                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4637                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4638                         bp->phy_port = PORT_FIBRE;
4639                 else
4640                         bp->phy_port = PORT_TP;
4641
4642                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4643                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4644         }
4645
4646         if (netif_running(bp->dev) && sig)
4647                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4648 }
4649
4650 static void
4651 bnx2_setup_msix_tbl(struct bnx2 *bp)
4652 {
4653         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4654
4655         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4656         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4657 }
4658
4659 static int
4660 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4661 {
4662         u32 val;
4663         int i, rc = 0;
4664         u8 old_port;
4665
4666         /* Wait for the current PCI transaction to complete before
4667          * issuing a reset. */
4668         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4669                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4670                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4671                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4672                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4673         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4674         udelay(5);
4675
4676         /* Wait for the firmware to tell us it is ok to issue a reset. */
4677         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4678
4679         /* Deposit a driver reset signature so the firmware knows that
4680          * this is a soft reset. */
4681         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4682                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4683
4684         /* Do a dummy read to force the chip to complete all current transaction
4685          * before we issue a reset. */
4686         val = REG_RD(bp, BNX2_MISC_ID);
4687
4688         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4689                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4690                 REG_RD(bp, BNX2_MISC_COMMAND);
4691                 udelay(5);
4692
4693                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4694                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4695
4696                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4697
4698         } else {
4699                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4700                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4701                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4702
4703                 /* Chip reset. */
4704                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4705
4706                 /* Reading back any register after chip reset will hang the
4707                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4708                  * of margin for write posting.
4709                  */
4710                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4711                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4712                         msleep(20);
4713
4714                 /* Reset takes approximate 30 usec */
4715                 for (i = 0; i < 10; i++) {
4716                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4717                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4718                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4719                                 break;
4720                         udelay(10);
4721                 }
4722
4723                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4724                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4725                         pr_err("Chip reset did not complete\n");
4726                         return -EBUSY;
4727                 }
4728         }
4729
4730         /* Make sure byte swapping is properly configured. */
4731         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4732         if (val != 0x01020304) {
4733                 pr_err("Chip not in correct endian mode\n");
4734                 return -ENODEV;
4735         }
4736
4737         /* Wait for the firmware to finish its initialization. */
4738         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4739         if (rc)
4740                 return rc;
4741
4742         spin_lock_bh(&bp->phy_lock);
4743         old_port = bp->phy_port;
4744         bnx2_init_fw_cap(bp);
4745         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4746             old_port != bp->phy_port)
4747                 bnx2_set_default_remote_link(bp);
4748         spin_unlock_bh(&bp->phy_lock);
4749
4750         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4751                 /* Adjust the voltage regular to two steps lower.  The default
4752                  * of this register is 0x0000000e. */
4753                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4754
4755                 /* Remove bad rbuf memory from the free pool. */
4756                 rc = bnx2_alloc_bad_rbuf(bp);
4757         }
4758
4759         if (bp->flags & BNX2_FLAG_USING_MSIX)
4760                 bnx2_setup_msix_tbl(bp);
4761
4762         return rc;
4763 }
4764
4765 static int
4766 bnx2_init_chip(struct bnx2 *bp)
4767 {
4768         u32 val, mtu;
4769         int rc, i;
4770
4771         /* Make sure the interrupt is not active. */
4772         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4773
4774         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4775               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4776 #ifdef __BIG_ENDIAN
4777               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4778 #endif
4779               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4780               DMA_READ_CHANS << 12 |
4781               DMA_WRITE_CHANS << 16;
4782
4783         val |= (0x2 << 20) | (1 << 11);
4784
4785         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4786                 val |= (1 << 23);
4787
4788         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4789             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4790                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4791
4792         REG_WR(bp, BNX2_DMA_CONFIG, val);
4793
4794         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4795                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4796                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4797                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4798         }
4799
4800         if (bp->flags & BNX2_FLAG_PCIX) {
4801                 u16 val16;
4802
4803                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4804                                      &val16);
4805                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4806                                       val16 & ~PCI_X_CMD_ERO);
4807         }
4808
4809         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4810                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4811                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4812                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4813
4814         /* Initialize context mapping and zero out the quick contexts.  The
4815          * context block must have already been enabled. */
4816         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4817                 rc = bnx2_init_5709_context(bp);
4818                 if (rc)
4819                         return rc;
4820         } else
4821                 bnx2_init_context(bp);
4822
4823         if ((rc = bnx2_init_cpus(bp)) != 0)
4824                 return rc;
4825
4826         bnx2_init_nvram(bp);
4827
4828         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4829
4830         val = REG_RD(bp, BNX2_MQ_CONFIG);
4831         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4832         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4833         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4834                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4835                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4836                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4837         }
4838
4839         REG_WR(bp, BNX2_MQ_CONFIG, val);
4840
4841         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4842         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4843         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4844
4845         val = (BCM_PAGE_BITS - 8) << 24;
4846         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4847
4848         /* Configure page size. */
4849         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4850         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4851         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4852         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4853
4854         val = bp->mac_addr[0] +
4855               (bp->mac_addr[1] << 8) +
4856               (bp->mac_addr[2] << 16) +
4857               bp->mac_addr[3] +
4858               (bp->mac_addr[4] << 8) +
4859               (bp->mac_addr[5] << 16);
4860         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4861
4862         /* Program the MTU.  Also include 4 bytes for CRC32. */
4863         mtu = bp->dev->mtu;
4864         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4865         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4866                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4867         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4868
4869         if (mtu < 1500)
4870                 mtu = 1500;
4871
4872         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4873         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4874         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4875
4876         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4877         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4878                 bp->bnx2_napi[i].last_status_idx = 0;
4879
4880         bp->idle_chk_status_idx = 0xffff;
4881
4882         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4883
4884         /* Set up how to generate a link change interrupt. */
4885         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4886
4887         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4888                (u64) bp->status_blk_mapping & 0xffffffff);
4889         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4890
4891         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4892                (u64) bp->stats_blk_mapping & 0xffffffff);
4893         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4894                (u64) bp->stats_blk_mapping >> 32);
4895
4896         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4897                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4898
4899         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4900                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4901
4902         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4903                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4904
4905         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4906
4907         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4908
4909         REG_WR(bp, BNX2_HC_COM_TICKS,
4910                (bp->com_ticks_int << 16) | bp->com_ticks);
4911
4912         REG_WR(bp, BNX2_HC_CMD_TICKS,
4913                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4914
4915         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4916                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4917         else
4918                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4919         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4920
4921         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4922                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4923         else {
4924                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4925                       BNX2_HC_CONFIG_COLLECT_STATS;
4926         }
4927
4928         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4929                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4930                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4931
4932                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4933         }
4934
4935         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4936                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4937
4938         REG_WR(bp, BNX2_HC_CONFIG, val);
4939
4940         for (i = 1; i < bp->irq_nvecs; i++) {
4941                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4942                            BNX2_HC_SB_CONFIG_1;
4943
4944                 REG_WR(bp, base,
4945                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4946                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4947                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4948
4949                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4950                         (bp->tx_quick_cons_trip_int << 16) |
4951                          bp->tx_quick_cons_trip);
4952
4953                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4954                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4955
4956                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4957                        (bp->rx_quick_cons_trip_int << 16) |
4958                         bp->rx_quick_cons_trip);
4959
4960                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4961                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4962         }
4963
4964         /* Clear internal stats counters. */
4965         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4966
4967         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4968
4969         /* Initialize the receive filter. */
4970         bnx2_set_rx_mode(bp->dev);
4971
4972         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4973                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4974                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4975                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4976         }
4977         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4978                           1, 0);
4979
4980         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4981         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4982
4983         udelay(20);
4984
4985         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4986
4987         return rc;
4988 }
4989
4990 static void
4991 bnx2_clear_ring_states(struct bnx2 *bp)
4992 {
4993         struct bnx2_napi *bnapi;
4994         struct bnx2_tx_ring_info *txr;
4995         struct bnx2_rx_ring_info *rxr;
4996         int i;
4997
4998         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4999                 bnapi = &bp->bnx2_napi[i];
5000                 txr = &bnapi->tx_ring;
5001                 rxr = &bnapi->rx_ring;
5002
5003                 txr->tx_cons = 0;
5004                 txr->hw_tx_cons = 0;
5005                 rxr->rx_prod_bseq = 0;
5006                 rxr->rx_prod = 0;
5007                 rxr->rx_cons = 0;
5008                 rxr->rx_pg_prod = 0;
5009                 rxr->rx_pg_cons = 0;
5010         }
5011 }
5012
5013 static void
5014 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5015 {
5016         u32 val, offset0, offset1, offset2, offset3;
5017         u32 cid_addr = GET_CID_ADDR(cid);
5018
5019         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5020                 offset0 = BNX2_L2CTX_TYPE_XI;
5021                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5022                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5023                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5024         } else {
5025                 offset0 = BNX2_L2CTX_TYPE;
5026                 offset1 = BNX2_L2CTX_CMD_TYPE;
5027                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5028                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5029         }
5030         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5031         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5032
5033         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5034         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5035
5036         val = (u64) txr->tx_desc_mapping >> 32;
5037         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5038
5039         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5040         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5041 }
5042
5043 static void
5044 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5045 {
5046         struct tx_bd *txbd;
5047         u32 cid = TX_CID;
5048         struct bnx2_napi *bnapi;
5049         struct bnx2_tx_ring_info *txr;
5050
5051         bnapi = &bp->bnx2_napi[ring_num];
5052         txr = &bnapi->tx_ring;
5053
5054         if (ring_num == 0)
5055                 cid = TX_CID;
5056         else
5057                 cid = TX_TSS_CID + ring_num - 1;
5058
5059         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5060
5061         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5062
5063         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5064         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5065
5066         txr->tx_prod = 0;
5067         txr->tx_prod_bseq = 0;
5068
5069         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5070         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5071
5072         bnx2_init_tx_context(bp, cid, txr);
5073 }
5074
5075 static void
5076 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5077                      int num_rings)
5078 {
5079         int i;
5080         struct rx_bd *rxbd;
5081
5082         for (i = 0; i < num_rings; i++) {
5083                 int j;
5084
5085                 rxbd = &rx_ring[i][0];
5086                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5087                         rxbd->rx_bd_len = buf_size;
5088                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5089                 }
5090                 if (i == (num_rings - 1))
5091                         j = 0;
5092                 else
5093                         j = i + 1;
5094                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5095                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5096         }
5097 }
5098
5099 static void
5100 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5101 {
5102         int i;
5103         u16 prod, ring_prod;
5104         u32 cid, rx_cid_addr, val;
5105         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5106         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5107
5108         if (ring_num == 0)
5109                 cid = RX_CID;
5110         else
5111                 cid = RX_RSS_CID + ring_num - 1;
5112
5113         rx_cid_addr = GET_CID_ADDR(cid);
5114
5115         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5116                              bp->rx_buf_use_size, bp->rx_max_ring);
5117
5118         bnx2_init_rx_context(bp, cid);
5119
5120         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5121                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5122                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5123         }
5124
5125         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5126         if (bp->rx_pg_ring_size) {
5127                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5128                                      rxr->rx_pg_desc_mapping,
5129                                      PAGE_SIZE, bp->rx_max_pg_ring);
5130                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5131                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5132                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5133                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5134
5135                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5136                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5137
5138                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5139                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5140
5141                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5142                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5143         }
5144
5145         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5146         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5147
5148         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5149         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5150
5151         ring_prod = prod = rxr->rx_pg_prod;
5152         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5153                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5154                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5155                                     ring_num, i, bp->rx_pg_ring_size);
5156                         break;
5157                 }
5158                 prod = NEXT_RX_BD(prod);
5159                 ring_prod = RX_PG_RING_IDX(prod);
5160         }
5161         rxr->rx_pg_prod = prod;
5162
5163         ring_prod = prod = rxr->rx_prod;
5164         for (i = 0; i < bp->rx_ring_size; i++) {
5165                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5166                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5167                                     ring_num, i, bp->rx_ring_size);
5168                         break;
5169                 }
5170                 prod = NEXT_RX_BD(prod);
5171                 ring_prod = RX_RING_IDX(prod);
5172         }
5173         rxr->rx_prod = prod;
5174
5175         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5176         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5177         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5178
5179         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5180         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5181
5182         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5183 }
5184
5185 static void
5186 bnx2_init_all_rings(struct bnx2 *bp)
5187 {
5188         int i;
5189         u32 val;
5190
5191         bnx2_clear_ring_states(bp);
5192
5193         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5194         for (i = 0; i < bp->num_tx_rings; i++)
5195                 bnx2_init_tx_ring(bp, i);
5196
5197         if (bp->num_tx_rings > 1)
5198                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5199                        (TX_TSS_CID << 7));
5200
5201         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5202         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5203
5204         for (i = 0; i < bp->num_rx_rings; i++)
5205                 bnx2_init_rx_ring(bp, i);
5206
5207         if (bp->num_rx_rings > 1) {
5208                 u32 tbl_32;
5209                 u8 *tbl = (u8 *) &tbl_32;
5210
5211                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5212                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5213
5214                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5215                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5216                         if ((i % 4) == 3)
5217                                 bnx2_reg_wr_ind(bp,
5218                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5219                                                 cpu_to_be32(tbl_32));
5220                 }
5221
5222                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5223                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5224
5225                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5226
5227         }
5228 }
5229
5230 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5231 {
5232         u32 max, num_rings = 1;
5233
5234         while (ring_size > MAX_RX_DESC_CNT) {
5235                 ring_size -= MAX_RX_DESC_CNT;
5236                 num_rings++;
5237         }
5238         /* round to next power of 2 */
5239         max = max_size;
5240         while ((max & num_rings) == 0)
5241                 max >>= 1;
5242
5243         if (num_rings != max)
5244                 max <<= 1;
5245
5246         return max;
5247 }
5248
5249 static void
5250 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5251 {
5252         u32 rx_size, rx_space, jumbo_size;
5253
5254         /* 8 for CRC and VLAN */
5255         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5256
5257         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5258                 sizeof(struct skb_shared_info);
5259
5260         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5261         bp->rx_pg_ring_size = 0;
5262         bp->rx_max_pg_ring = 0;
5263         bp->rx_max_pg_ring_idx = 0;
5264         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5265                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5266
5267                 jumbo_size = size * pages;
5268                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5269                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5270
5271                 bp->rx_pg_ring_size = jumbo_size;
5272                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5273                                                         MAX_RX_PG_RINGS);
5274                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5275                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5276                 bp->rx_copy_thresh = 0;
5277         }
5278
5279         bp->rx_buf_use_size = rx_size;
5280         /* hw alignment */
5281         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5282         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5283         bp->rx_ring_size = size;
5284         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5285         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5286 }
5287
5288 static void
5289 bnx2_free_tx_skbs(struct bnx2 *bp)
5290 {
5291         int i;
5292
5293         for (i = 0; i < bp->num_tx_rings; i++) {
5294                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5295                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5296                 int j;
5297
5298                 if (txr->tx_buf_ring == NULL)
5299                         continue;
5300
5301                 for (j = 0; j < TX_DESC_CNT; ) {
5302                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5303                         struct sk_buff *skb = tx_buf->skb;
5304                         int k, last;
5305
5306                         if (skb == NULL) {
5307                                 j++;
5308                                 continue;
5309                         }
5310
5311                         pci_unmap_single(bp->pdev,
5312                                          pci_unmap_addr(tx_buf, mapping),
5313                                          skb_headlen(skb),
5314                                          PCI_DMA_TODEVICE);
5315
5316                         tx_buf->skb = NULL;
5317
5318                         last = tx_buf->nr_frags;
5319                         j++;
5320                         for (k = 0; k < last; k++, j++) {
5321                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5322                                 pci_unmap_page(bp->pdev,
5323                                         pci_unmap_addr(tx_buf, mapping),
5324                                         skb_shinfo(skb)->frags[k].size,
5325                                         PCI_DMA_TODEVICE);
5326                         }
5327                         dev_kfree_skb(skb);
5328                 }
5329         }
5330 }
5331
5332 static void
5333 bnx2_free_rx_skbs(struct bnx2 *bp)
5334 {
5335         int i;
5336
5337         for (i = 0; i < bp->num_rx_rings; i++) {
5338                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5339                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5340                 int j;
5341
5342                 if (rxr->rx_buf_ring == NULL)
5343                         return;
5344
5345                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5346                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5347                         struct sk_buff *skb = rx_buf->skb;
5348
5349                         if (skb == NULL)
5350                                 continue;
5351
5352                         pci_unmap_single(bp->pdev,
5353                                          pci_unmap_addr(rx_buf, mapping),
5354                                          bp->rx_buf_use_size,
5355                                          PCI_DMA_FROMDEVICE);
5356
5357                         rx_buf->skb = NULL;
5358
5359                         dev_kfree_skb(skb);
5360                 }
5361                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5362                         bnx2_free_rx_page(bp, rxr, j);
5363         }
5364 }
5365
5366 static void
5367 bnx2_free_skbs(struct bnx2 *bp)
5368 {
5369         bnx2_free_tx_skbs(bp);
5370         bnx2_free_rx_skbs(bp);
5371 }
5372
5373 static int
5374 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5375 {
5376         int rc;
5377
5378         rc = bnx2_reset_chip(bp, reset_code);
5379         bnx2_free_skbs(bp);
5380         if (rc)
5381                 return rc;
5382
5383         if ((rc = bnx2_init_chip(bp)) != 0)
5384                 return rc;
5385
5386         bnx2_init_all_rings(bp);
5387         return 0;
5388 }
5389
5390 static int
5391 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5392 {
5393         int rc;
5394
5395         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5396                 return rc;
5397
5398         spin_lock_bh(&bp->phy_lock);
5399         bnx2_init_phy(bp, reset_phy);
5400         bnx2_set_link(bp);
5401         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5402                 bnx2_remote_phy_event(bp);
5403         spin_unlock_bh(&bp->phy_lock);
5404         return 0;
5405 }
5406
5407 static int
5408 bnx2_shutdown_chip(struct bnx2 *bp)
5409 {
5410         u32 reset_code;
5411
5412         if (bp->flags & BNX2_FLAG_NO_WOL)
5413                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5414         else if (bp->wol)
5415                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5416         else
5417                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5418
5419         return bnx2_reset_chip(bp, reset_code);
5420 }
5421
5422 static int
5423 bnx2_test_registers(struct bnx2 *bp)
5424 {
5425         int ret;
5426         int i, is_5709;
5427         static const struct {
5428                 u16   offset;
5429                 u16   flags;
5430 #define BNX2_FL_NOT_5709        1
5431                 u32   rw_mask;
5432                 u32   ro_mask;
5433         } reg_tbl[] = {
5434                 { 0x006c, 0, 0x00000000, 0x0000003f },
5435                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5436                 { 0x0094, 0, 0x00000000, 0x00000000 },
5437
5438                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5439                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5440                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5441                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5442                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5443                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5444                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5445                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5446                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5447
5448                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5449                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5450                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5451                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5452                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5453                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5454
5455                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5456                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5457                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5458
5459                 { 0x1000, 0, 0x00000000, 0x00000001 },
5460                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5461
5462                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5463                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5464                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5465                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5466                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5467                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5468                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5469                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5470                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5471                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5472
5473                 { 0x1800, 0, 0x00000000, 0x00000001 },
5474                 { 0x1804, 0, 0x00000000, 0x00000003 },
5475
5476                 { 0x2800, 0, 0x00000000, 0x00000001 },
5477                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5478                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5479                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5480                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5481                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5482                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5483                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5484                 { 0x2840, 0, 0x00000000, 0xffffffff },
5485                 { 0x2844, 0, 0x00000000, 0xffffffff },
5486                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5487                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5488
5489                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5490                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5491
5492                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5493                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5494                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5495                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5496                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5497                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5498                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5499                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5500                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5501
5502                 { 0x5004, 0, 0x00000000, 0x0000007f },
5503                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5504
5505                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5506                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5507                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5508                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5509                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5510                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5511                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5512                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5513                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5514
5515                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5516                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5517                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5518                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5519                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5520                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5521                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5522                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5523                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5524                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5525                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5526                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5527                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5528                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5529                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5530                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5531                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5532                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5533                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5534                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5535                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5536                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5537                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5538
5539                 { 0xffff, 0, 0x00000000, 0x00000000 },
5540         };
5541
5542         ret = 0;
5543         is_5709 = 0;
5544         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5545                 is_5709 = 1;
5546
5547         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5548                 u32 offset, rw_mask, ro_mask, save_val, val;
5549                 u16 flags = reg_tbl[i].flags;
5550
5551                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5552                         continue;
5553
5554                 offset = (u32) reg_tbl[i].offset;
5555                 rw_mask = reg_tbl[i].rw_mask;
5556                 ro_mask = reg_tbl[i].ro_mask;
5557
5558                 save_val = readl(bp->regview + offset);
5559
5560                 writel(0, bp->regview + offset);
5561
5562                 val = readl(bp->regview + offset);
5563                 if ((val & rw_mask) != 0) {
5564                         goto reg_test_err;
5565                 }
5566
5567                 if ((val & ro_mask) != (save_val & ro_mask)) {
5568                         goto reg_test_err;
5569                 }
5570
5571                 writel(0xffffffff, bp->regview + offset);
5572
5573                 val = readl(bp->regview + offset);
5574                 if ((val & rw_mask) != rw_mask) {
5575                         goto reg_test_err;
5576                 }
5577
5578                 if ((val & ro_mask) != (save_val & ro_mask)) {
5579                         goto reg_test_err;
5580                 }
5581
5582                 writel(save_val, bp->regview + offset);
5583                 continue;
5584
5585 reg_test_err:
5586                 writel(save_val, bp->regview + offset);
5587                 ret = -ENODEV;
5588                 break;
5589         }
5590         return ret;
5591 }
5592
5593 static int
5594 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5595 {
5596         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5597                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5598         int i;
5599
5600         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5601                 u32 offset;
5602
5603                 for (offset = 0; offset < size; offset += 4) {
5604
5605                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5606
5607                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5608                                 test_pattern[i]) {
5609                                 return -ENODEV;
5610                         }
5611                 }
5612         }
5613         return 0;
5614 }
5615
5616 static int
5617 bnx2_test_memory(struct bnx2 *bp)
5618 {
5619         int ret = 0;
5620         int i;
5621         static struct mem_entry {
5622                 u32   offset;
5623                 u32   len;
5624         } mem_tbl_5706[] = {
5625                 { 0x60000,  0x4000 },
5626                 { 0xa0000,  0x3000 },
5627                 { 0xe0000,  0x4000 },
5628                 { 0x120000, 0x4000 },
5629                 { 0x1a0000, 0x4000 },
5630                 { 0x160000, 0x4000 },
5631                 { 0xffffffff, 0    },
5632         },
5633         mem_tbl_5709[] = {
5634                 { 0x60000,  0x4000 },
5635                 { 0xa0000,  0x3000 },
5636                 { 0xe0000,  0x4000 },
5637                 { 0x120000, 0x4000 },
5638                 { 0x1a0000, 0x4000 },
5639                 { 0xffffffff, 0    },
5640         };
5641         struct mem_entry *mem_tbl;
5642
5643         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5644                 mem_tbl = mem_tbl_5709;
5645         else
5646                 mem_tbl = mem_tbl_5706;
5647
5648         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5649                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5650                         mem_tbl[i].len)) != 0) {
5651                         return ret;
5652                 }
5653         }
5654
5655         return ret;
5656 }
5657
5658 #define BNX2_MAC_LOOPBACK       0
5659 #define BNX2_PHY_LOOPBACK       1
5660
5661 static int
5662 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5663 {
5664         unsigned int pkt_size, num_pkts, i;
5665         struct sk_buff *skb, *rx_skb;
5666         unsigned char *packet;
5667         u16 rx_start_idx, rx_idx;
5668         dma_addr_t map;
5669         struct tx_bd *txbd;
5670         struct sw_bd *rx_buf;
5671         struct l2_fhdr *rx_hdr;
5672         int ret = -ENODEV;
5673         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5674         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5675         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5676
5677         tx_napi = bnapi;
5678
5679         txr = &tx_napi->tx_ring;
5680         rxr = &bnapi->rx_ring;
5681         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5682                 bp->loopback = MAC_LOOPBACK;
5683                 bnx2_set_mac_loopback(bp);
5684         }
5685         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5686                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5687                         return 0;
5688
5689                 bp->loopback = PHY_LOOPBACK;
5690                 bnx2_set_phy_loopback(bp);
5691         }
5692         else
5693                 return -EINVAL;
5694
5695         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5696         skb = netdev_alloc_skb(bp->dev, pkt_size);
5697         if (!skb)
5698                 return -ENOMEM;
5699         packet = skb_put(skb, pkt_size);
5700         memcpy(packet, bp->dev->dev_addr, 6);
5701         memset(packet + 6, 0x0, 8);
5702         for (i = 14; i < pkt_size; i++)
5703                 packet[i] = (unsigned char) (i & 0xff);
5704
5705         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5706                 PCI_DMA_TODEVICE);
5707         if (pci_dma_mapping_error(bp->pdev, map)) {
5708                 dev_kfree_skb(skb);
5709                 return -EIO;
5710         }
5711
5712         REG_WR(bp, BNX2_HC_COMMAND,
5713                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5714
5715         REG_RD(bp, BNX2_HC_COMMAND);
5716
5717         udelay(5);
5718         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5719
5720         num_pkts = 0;
5721
5722         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5723
5724         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5725         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5726         txbd->tx_bd_mss_nbytes = pkt_size;
5727         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5728
5729         num_pkts++;
5730         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5731         txr->tx_prod_bseq += pkt_size;
5732
5733         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5734         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5735
5736         udelay(100);
5737
5738         REG_WR(bp, BNX2_HC_COMMAND,
5739                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5740
5741         REG_RD(bp, BNX2_HC_COMMAND);
5742
5743         udelay(5);
5744
5745         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5746         dev_kfree_skb(skb);
5747
5748         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5749                 goto loopback_test_done;
5750
5751         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5752         if (rx_idx != rx_start_idx + num_pkts) {
5753                 goto loopback_test_done;
5754         }
5755
5756         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5757         rx_skb = rx_buf->skb;
5758
5759         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5760         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5761
5762         pci_dma_sync_single_for_cpu(bp->pdev,
5763                 pci_unmap_addr(rx_buf, mapping),
5764                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5765
5766         if (rx_hdr->l2_fhdr_status &
5767                 (L2_FHDR_ERRORS_BAD_CRC |
5768                 L2_FHDR_ERRORS_PHY_DECODE |
5769                 L2_FHDR_ERRORS_ALIGNMENT |
5770                 L2_FHDR_ERRORS_TOO_SHORT |
5771                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5772
5773                 goto loopback_test_done;
5774         }
5775
5776         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5777                 goto loopback_test_done;
5778         }
5779
5780         for (i = 14; i < pkt_size; i++) {
5781                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5782                         goto loopback_test_done;
5783                 }
5784         }
5785
5786         ret = 0;
5787
5788 loopback_test_done:
5789         bp->loopback = 0;
5790         return ret;
5791 }
5792
5793 #define BNX2_MAC_LOOPBACK_FAILED        1
5794 #define BNX2_PHY_LOOPBACK_FAILED        2
5795 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5796                                          BNX2_PHY_LOOPBACK_FAILED)
5797
5798 static int
5799 bnx2_test_loopback(struct bnx2 *bp)
5800 {
5801         int rc = 0;
5802
5803         if (!netif_running(bp->dev))
5804                 return BNX2_LOOPBACK_FAILED;
5805
5806         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5807         spin_lock_bh(&bp->phy_lock);
5808         bnx2_init_phy(bp, 1);
5809         spin_unlock_bh(&bp->phy_lock);
5810         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5811                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5812         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5813                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5814         return rc;
5815 }
5816
5817 #define NVRAM_SIZE 0x200
5818 #define CRC32_RESIDUAL 0xdebb20e3
5819
5820 static int
5821 bnx2_test_nvram(struct bnx2 *bp)
5822 {
5823         __be32 buf[NVRAM_SIZE / 4];
5824         u8 *data = (u8 *) buf;
5825         int rc = 0;
5826         u32 magic, csum;
5827
5828         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5829                 goto test_nvram_done;
5830
5831         magic = be32_to_cpu(buf[0]);
5832         if (magic != 0x669955aa) {
5833                 rc = -ENODEV;
5834                 goto test_nvram_done;
5835         }
5836
5837         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5838                 goto test_nvram_done;
5839
5840         csum = ether_crc_le(0x100, data);
5841         if (csum != CRC32_RESIDUAL) {
5842                 rc = -ENODEV;
5843                 goto test_nvram_done;
5844         }
5845
5846         csum = ether_crc_le(0x100, data + 0x100);
5847         if (csum != CRC32_RESIDUAL) {
5848                 rc = -ENODEV;
5849         }
5850
5851 test_nvram_done:
5852         return rc;
5853 }
5854
5855 static int
5856 bnx2_test_link(struct bnx2 *bp)
5857 {
5858         u32 bmsr;
5859
5860         if (!netif_running(bp->dev))
5861                 return -ENODEV;
5862
5863         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5864                 if (bp->link_up)
5865                         return 0;
5866                 return -ENODEV;
5867         }
5868         spin_lock_bh(&bp->phy_lock);
5869         bnx2_enable_bmsr1(bp);
5870         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5871         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5872         bnx2_disable_bmsr1(bp);
5873         spin_unlock_bh(&bp->phy_lock);
5874
5875         if (bmsr & BMSR_LSTATUS) {
5876                 return 0;
5877         }
5878         return -ENODEV;
5879 }
5880
5881 static int
5882 bnx2_test_intr(struct bnx2 *bp)
5883 {
5884         int i;
5885         u16 status_idx;
5886
5887         if (!netif_running(bp->dev))
5888                 return -ENODEV;
5889
5890         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5891
5892         /* This register is not touched during run-time. */
5893         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5894         REG_RD(bp, BNX2_HC_COMMAND);
5895
5896         for (i = 0; i < 10; i++) {
5897                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5898                         status_idx) {
5899
5900                         break;
5901                 }
5902
5903                 msleep_interruptible(10);
5904         }
5905         if (i < 10)
5906                 return 0;
5907
5908         return -ENODEV;
5909 }
5910
5911 /* Determining link for parallel detection. */
5912 static int
5913 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5914 {
5915         u32 mode_ctl, an_dbg, exp;
5916
5917         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5918                 return 0;
5919
5920         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5921         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5922
5923         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5924                 return 0;
5925
5926         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5927         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5928         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5929
5930         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5931                 return 0;
5932
5933         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5934         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5935         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5936
5937         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5938                 return 0;
5939
5940         return 1;
5941 }
5942
5943 static void
5944 bnx2_5706_serdes_timer(struct bnx2 *bp)
5945 {
5946         int check_link = 1;
5947
5948         spin_lock(&bp->phy_lock);
5949         if (bp->serdes_an_pending) {
5950                 bp->serdes_an_pending--;
5951                 check_link = 0;
5952         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5953                 u32 bmcr;
5954
5955                 bp->current_interval = BNX2_TIMER_INTERVAL;
5956
5957                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5958
5959                 if (bmcr & BMCR_ANENABLE) {
5960                         if (bnx2_5706_serdes_has_link(bp)) {
5961                                 bmcr &= ~BMCR_ANENABLE;
5962                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5963                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5964                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5965                         }
5966                 }
5967         }
5968         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5969                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5970                 u32 phy2;
5971
5972                 bnx2_write_phy(bp, 0x17, 0x0f01);
5973                 bnx2_read_phy(bp, 0x15, &phy2);
5974                 if (phy2 & 0x20) {
5975                         u32 bmcr;
5976
5977                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5978                         bmcr |= BMCR_ANENABLE;
5979                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5980
5981                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5982                 }
5983         } else
5984                 bp->current_interval = BNX2_TIMER_INTERVAL;
5985
5986         if (check_link) {
5987                 u32 val;
5988
5989                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5990                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5991                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5992
5993                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5994                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5995                                 bnx2_5706s_force_link_dn(bp, 1);
5996                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5997                         } else
5998                                 bnx2_set_link(bp);
5999                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6000                         bnx2_set_link(bp);
6001         }
6002         spin_unlock(&bp->phy_lock);
6003 }
6004
6005 static void
6006 bnx2_5708_serdes_timer(struct bnx2 *bp)
6007 {
6008         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6009                 return;
6010
6011         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6012                 bp->serdes_an_pending = 0;
6013                 return;
6014         }
6015
6016         spin_lock(&bp->phy_lock);
6017         if (bp->serdes_an_pending)
6018                 bp->serdes_an_pending--;
6019         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6020                 u32 bmcr;
6021
6022                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6023                 if (bmcr & BMCR_ANENABLE) {
6024                         bnx2_enable_forced_2g5(bp);
6025                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6026                 } else {
6027                         bnx2_disable_forced_2g5(bp);
6028                         bp->serdes_an_pending = 2;
6029                         bp->current_interval = BNX2_TIMER_INTERVAL;
6030                 }
6031
6032         } else
6033                 bp->current_interval = BNX2_TIMER_INTERVAL;
6034
6035         spin_unlock(&bp->phy_lock);
6036 }
6037
6038 static void
6039 bnx2_timer(unsigned long data)
6040 {
6041         struct bnx2 *bp = (struct bnx2 *) data;
6042
6043         if (!netif_running(bp->dev))
6044                 return;
6045
6046         if (atomic_read(&bp->intr_sem) != 0)
6047                 goto bnx2_restart_timer;
6048
6049         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6050              BNX2_FLAG_USING_MSI)
6051                 bnx2_chk_missed_msi(bp);
6052
6053         bnx2_send_heart_beat(bp);
6054
6055         bp->stats_blk->stat_FwRxDrop =
6056                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6057
6058         /* workaround occasional corrupted counters */
6059         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6060                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6061                                             BNX2_HC_COMMAND_STATS_NOW);
6062
6063         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6064                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6065                         bnx2_5706_serdes_timer(bp);
6066                 else
6067                         bnx2_5708_serdes_timer(bp);
6068         }
6069
6070 bnx2_restart_timer:
6071         mod_timer(&bp->timer, jiffies + bp->current_interval);
6072 }
6073
6074 static int
6075 bnx2_request_irq(struct bnx2 *bp)
6076 {
6077         unsigned long flags;
6078         struct bnx2_irq *irq;
6079         int rc = 0, i;
6080
6081         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6082                 flags = 0;
6083         else
6084                 flags = IRQF_SHARED;
6085
6086         for (i = 0; i < bp->irq_nvecs; i++) {
6087                 irq = &bp->irq_tbl[i];
6088                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6089                                  &bp->bnx2_napi[i]);
6090                 if (rc)
6091                         break;
6092                 irq->requested = 1;
6093         }
6094         return rc;
6095 }
6096
6097 static void
6098 bnx2_free_irq(struct bnx2 *bp)
6099 {
6100         struct bnx2_irq *irq;
6101         int i;
6102
6103         for (i = 0; i < bp->irq_nvecs; i++) {
6104                 irq = &bp->irq_tbl[i];
6105                 if (irq->requested)
6106                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6107                 irq->requested = 0;
6108         }
6109         if (bp->flags & BNX2_FLAG_USING_MSI)
6110                 pci_disable_msi(bp->pdev);
6111         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6112                 pci_disable_msix(bp->pdev);
6113
6114         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6115 }
6116
6117 static void
6118 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6119 {
6120         int i, rc;
6121         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6122         struct net_device *dev = bp->dev;
6123         const int len = sizeof(bp->irq_tbl[0].name);
6124
6125         bnx2_setup_msix_tbl(bp);
6126         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6127         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6128         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6129
6130         /*  Need to flush the previous three writes to ensure MSI-X
6131          *  is setup properly */
6132         REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6133
6134         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6135                 msix_ent[i].entry = i;
6136                 msix_ent[i].vector = 0;
6137         }
6138
6139         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6140         if (rc != 0)
6141                 return;
6142
6143         bp->irq_nvecs = msix_vecs;
6144         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6145         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6146                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6147                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6148                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6149         }
6150 }
6151
6152 static void
6153 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6154 {
6155         int cpus = num_online_cpus();
6156         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6157
6158         bp->irq_tbl[0].handler = bnx2_interrupt;
6159         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6160         bp->irq_nvecs = 1;
6161         bp->irq_tbl[0].vector = bp->pdev->irq;
6162
6163         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6164                 bnx2_enable_msix(bp, msix_vecs);
6165
6166         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6167             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6168                 if (pci_enable_msi(bp->pdev) == 0) {
6169                         bp->flags |= BNX2_FLAG_USING_MSI;
6170                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6171                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6172                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6173                         } else
6174                                 bp->irq_tbl[0].handler = bnx2_msi;
6175
6176                         bp->irq_tbl[0].vector = bp->pdev->irq;
6177                 }
6178         }
6179
6180         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6181         bp->dev->real_num_tx_queues = bp->num_tx_rings;
6182
6183         bp->num_rx_rings = bp->irq_nvecs;
6184 }
6185
6186 /* Called with rtnl_lock */
6187 static int
6188 bnx2_open(struct net_device *dev)
6189 {
6190         struct bnx2 *bp = netdev_priv(dev);
6191         int rc;
6192
6193         netif_carrier_off(dev);
6194
6195         bnx2_set_power_state(bp, PCI_D0);
6196         bnx2_disable_int(bp);
6197
6198         bnx2_setup_int_mode(bp, disable_msi);
6199         bnx2_napi_enable(bp);
6200         rc = bnx2_alloc_mem(bp);
6201         if (rc)
6202                 goto open_err;
6203
6204         rc = bnx2_request_irq(bp);
6205         if (rc)
6206                 goto open_err;
6207
6208         rc = bnx2_init_nic(bp, 1);
6209         if (rc)
6210                 goto open_err;
6211
6212         mod_timer(&bp->timer, jiffies + bp->current_interval);
6213
6214         atomic_set(&bp->intr_sem, 0);
6215
6216         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6217
6218         bnx2_enable_int(bp);
6219
6220         if (bp->flags & BNX2_FLAG_USING_MSI) {
6221                 /* Test MSI to make sure it is working
6222                  * If MSI test fails, go back to INTx mode
6223                  */
6224                 if (bnx2_test_intr(bp) != 0) {
6225                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6226
6227                         bnx2_disable_int(bp);
6228                         bnx2_free_irq(bp);
6229
6230                         bnx2_setup_int_mode(bp, 1);
6231
6232                         rc = bnx2_init_nic(bp, 0);
6233
6234                         if (!rc)
6235                                 rc = bnx2_request_irq(bp);
6236
6237                         if (rc) {
6238                                 del_timer_sync(&bp->timer);
6239                                 goto open_err;
6240                         }
6241                         bnx2_enable_int(bp);
6242                 }
6243         }
6244         if (bp->flags & BNX2_FLAG_USING_MSI)
6245                 netdev_info(dev, "using MSI\n");
6246         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6247                 netdev_info(dev, "using MSIX\n");
6248
6249         netif_tx_start_all_queues(dev);
6250
6251         return 0;
6252
6253 open_err:
6254         bnx2_napi_disable(bp);
6255         bnx2_free_skbs(bp);
6256         bnx2_free_irq(bp);
6257         bnx2_free_mem(bp);
6258         return rc;
6259 }
6260
6261 static void
6262 bnx2_reset_task(struct work_struct *work)
6263 {
6264         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6265
6266         rtnl_lock();
6267         if (!netif_running(bp->dev)) {
6268                 rtnl_unlock();
6269                 return;
6270         }
6271
6272         bnx2_netif_stop(bp);
6273
6274         bnx2_init_nic(bp, 1);
6275
6276         atomic_set(&bp->intr_sem, 1);
6277         bnx2_netif_start(bp);
6278         rtnl_unlock();
6279 }
6280
6281 static void
6282 bnx2_dump_state(struct bnx2 *bp)
6283 {
6284         struct net_device *dev = bp->dev;
6285
6286         netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6287         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6288                    REG_RD(bp, BNX2_EMAC_TX_STATUS),
6289                    REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6290         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6291                    bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6292                    bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6293         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6294                    REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6295         if (bp->flags & BNX2_FLAG_USING_MSIX)
6296                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6297                            REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6298 }
6299
6300 static void
6301 bnx2_tx_timeout(struct net_device *dev)
6302 {
6303         struct bnx2 *bp = netdev_priv(dev);
6304
6305         bnx2_dump_state(bp);
6306
6307         /* This allows the netif to be shutdown gracefully before resetting */
6308         schedule_work(&bp->reset_task);
6309 }
6310
6311 #ifdef BCM_VLAN
6312 /* Called with rtnl_lock */
6313 static void
6314 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6315 {
6316         struct bnx2 *bp = netdev_priv(dev);
6317
6318         if (netif_running(dev))
6319                 bnx2_netif_stop(bp);
6320
6321         bp->vlgrp = vlgrp;
6322
6323         if (!netif_running(dev))
6324                 return;
6325
6326         bnx2_set_rx_mode(dev);
6327         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6328                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6329
6330         bnx2_netif_start(bp);
6331 }
6332 #endif
6333
6334 /* Called with netif_tx_lock.
6335  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6336  * netif_wake_queue().
6337  */
6338 static netdev_tx_t
6339 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6340 {
6341         struct bnx2 *bp = netdev_priv(dev);
6342         dma_addr_t mapping;
6343         struct tx_bd *txbd;
6344         struct sw_tx_bd *tx_buf;
6345         u32 len, vlan_tag_flags, last_frag, mss;
6346         u16 prod, ring_prod;
6347         int i;
6348         struct bnx2_napi *bnapi;
6349         struct bnx2_tx_ring_info *txr;
6350         struct netdev_queue *txq;
6351
6352         /*  Determine which tx ring we will be placed on */
6353         i = skb_get_queue_mapping(skb);
6354         bnapi = &bp->bnx2_napi[i];
6355         txr = &bnapi->tx_ring;
6356         txq = netdev_get_tx_queue(dev, i);
6357
6358         if (unlikely(bnx2_tx_avail(bp, txr) <
6359             (skb_shinfo(skb)->nr_frags + 1))) {
6360                 netif_tx_stop_queue(txq);
6361                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6362
6363                 return NETDEV_TX_BUSY;
6364         }
6365         len = skb_headlen(skb);
6366         prod = txr->tx_prod;
6367         ring_prod = TX_RING_IDX(prod);
6368
6369         vlan_tag_flags = 0;
6370         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6371                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6372         }
6373
6374 #ifdef BCM_VLAN
6375         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6376                 vlan_tag_flags |=
6377                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6378         }
6379 #endif
6380         if ((mss = skb_shinfo(skb)->gso_size)) {
6381                 u32 tcp_opt_len;
6382                 struct iphdr *iph;
6383
6384                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6385
6386                 tcp_opt_len = tcp_optlen(skb);
6387
6388                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6389                         u32 tcp_off = skb_transport_offset(skb) -
6390                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6391
6392                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6393                                           TX_BD_FLAGS_SW_FLAGS;
6394                         if (likely(tcp_off == 0))
6395                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6396                         else {
6397                                 tcp_off >>= 3;
6398                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6399                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6400                                                   ((tcp_off & 0x10) <<
6401                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6402                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6403                         }
6404                 } else {
6405                         iph = ip_hdr(skb);
6406                         if (tcp_opt_len || (iph->ihl > 5)) {
6407                                 vlan_tag_flags |= ((iph->ihl - 5) +
6408                                                    (tcp_opt_len >> 2)) << 8;
6409                         }
6410                 }
6411         } else
6412                 mss = 0;
6413
6414         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6415         if (pci_dma_mapping_error(bp->pdev, mapping)) {
6416                 dev_kfree_skb(skb);
6417                 return NETDEV_TX_OK;
6418         }
6419
6420         tx_buf = &txr->tx_buf_ring[ring_prod];
6421         tx_buf->skb = skb;
6422         pci_unmap_addr_set(tx_buf, mapping, mapping);
6423
6424         txbd = &txr->tx_desc_ring[ring_prod];
6425
6426         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6427         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6428         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6429         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6430
6431         last_frag = skb_shinfo(skb)->nr_frags;
6432         tx_buf->nr_frags = last_frag;
6433         tx_buf->is_gso = skb_is_gso(skb);
6434
6435         for (i = 0; i < last_frag; i++) {
6436                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6437
6438                 prod = NEXT_TX_BD(prod);
6439                 ring_prod = TX_RING_IDX(prod);
6440                 txbd = &txr->tx_desc_ring[ring_prod];
6441
6442                 len = frag->size;
6443                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6444                         len, PCI_DMA_TODEVICE);
6445                 if (pci_dma_mapping_error(bp->pdev, mapping))
6446                         goto dma_error;
6447                 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6448                                    mapping);
6449
6450                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6451                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6452                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6453                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6454
6455         }
6456         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6457
6458         prod = NEXT_TX_BD(prod);
6459         txr->tx_prod_bseq += skb->len;
6460
6461         REG_WR16(bp, txr->tx_bidx_addr, prod);
6462         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6463
6464         mmiowb();
6465
6466         txr->tx_prod = prod;
6467
6468         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6469                 netif_tx_stop_queue(txq);
6470                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6471                         netif_tx_wake_queue(txq);
6472         }
6473
6474         return NETDEV_TX_OK;
6475 dma_error:
6476         /* save value of frag that failed */
6477         last_frag = i;
6478
6479         /* start back at beginning and unmap skb */
6480         prod = txr->tx_prod;
6481         ring_prod = TX_RING_IDX(prod);
6482         tx_buf = &txr->tx_buf_ring[ring_prod];
6483         tx_buf->skb = NULL;
6484         pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6485                          skb_headlen(skb), PCI_DMA_TODEVICE);
6486
6487         /* unmap remaining mapped pages */
6488         for (i = 0; i < last_frag; i++) {
6489                 prod = NEXT_TX_BD(prod);
6490                 ring_prod = TX_RING_IDX(prod);
6491                 tx_buf = &txr->tx_buf_ring[ring_prod];
6492                 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6493                                skb_shinfo(skb)->frags[i].size,
6494                                PCI_DMA_TODEVICE);
6495         }
6496
6497         dev_kfree_skb(skb);
6498         return NETDEV_TX_OK;
6499 }
6500
6501 /* Called with rtnl_lock */
6502 static int
6503 bnx2_close(struct net_device *dev)
6504 {
6505         struct bnx2 *bp = netdev_priv(dev);
6506
6507         cancel_work_sync(&bp->reset_task);
6508
6509         bnx2_disable_int_sync(bp);
6510         bnx2_napi_disable(bp);
6511         del_timer_sync(&bp->timer);
6512         bnx2_shutdown_chip(bp);
6513         bnx2_free_irq(bp);
6514         bnx2_free_skbs(bp);
6515         bnx2_free_mem(bp);
6516         bp->link_up = 0;
6517         netif_carrier_off(bp->dev);
6518         bnx2_set_power_state(bp, PCI_D3hot);
6519         return 0;
6520 }
6521
6522 static void
6523 bnx2_save_stats(struct bnx2 *bp)
6524 {
6525         u32 *hw_stats = (u32 *) bp->stats_blk;
6526         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6527         int i;
6528
6529         /* The 1st 10 counters are 64-bit counters */
6530         for (i = 0; i < 20; i += 2) {
6531                 u32 hi;
6532                 u64 lo;
6533
6534                 hi = temp_stats[i] + hw_stats[i];
6535                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6536                 if (lo > 0xffffffff)
6537                         hi++;
6538                 temp_stats[i] = hi;
6539                 temp_stats[i + 1] = lo & 0xffffffff;
6540         }
6541
6542         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6543                 temp_stats[i] += hw_stats[i];
6544 }
6545
6546 #define GET_64BIT_NET_STATS64(ctr)                              \
6547         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6548         (unsigned long) (ctr##_lo)
6549
6550 #define GET_64BIT_NET_STATS32(ctr)                              \
6551         (ctr##_lo)
6552
6553 #if (BITS_PER_LONG == 64)
6554 #define GET_64BIT_NET_STATS(ctr)                                \
6555         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6556         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6557 #else
6558 #define GET_64BIT_NET_STATS(ctr)                                \
6559         GET_64BIT_NET_STATS32(bp->stats_blk->ctr) +             \
6560         GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6561 #endif
6562
6563 #define GET_32BIT_NET_STATS(ctr)                                \
6564         (unsigned long) (bp->stats_blk->ctr +                   \
6565                          bp->temp_stats_blk->ctr)
6566
6567 static struct net_device_stats *
6568 bnx2_get_stats(struct net_device *dev)
6569 {
6570         struct bnx2 *bp = netdev_priv(dev);
6571         struct net_device_stats *net_stats = &dev->stats;
6572
6573         if (bp->stats_blk == NULL) {
6574                 return net_stats;
6575         }
6576         net_stats->rx_packets =
6577                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6578                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6579                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6580
6581         net_stats->tx_packets =
6582                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6583                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6584                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6585
6586         net_stats->rx_bytes =
6587                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6588
6589         net_stats->tx_bytes =
6590                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6591
6592         net_stats->multicast =
6593                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6594
6595         net_stats->collisions =
6596                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6597
6598         net_stats->rx_length_errors =
6599                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6600                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6601
6602         net_stats->rx_over_errors =
6603                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6604                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6605
6606         net_stats->rx_frame_errors =
6607                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6608
6609         net_stats->rx_crc_errors =
6610                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6611
6612         net_stats->rx_errors = net_stats->rx_length_errors +
6613                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6614                 net_stats->rx_crc_errors;
6615
6616         net_stats->tx_aborted_errors =
6617                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6618                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6619
6620         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6621             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6622                 net_stats->tx_carrier_errors = 0;
6623         else {
6624                 net_stats->tx_carrier_errors =
6625                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6626         }
6627
6628         net_stats->tx_errors =
6629                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6630                 net_stats->tx_aborted_errors +
6631                 net_stats->tx_carrier_errors;
6632
6633         net_stats->rx_missed_errors =
6634                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6635                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6636                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6637
6638         return net_stats;
6639 }
6640
6641 /* All ethtool functions called with rtnl_lock */
6642
6643 static int
6644 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6645 {
6646         struct bnx2 *bp = netdev_priv(dev);
6647         int support_serdes = 0, support_copper = 0;
6648
6649         cmd->supported = SUPPORTED_Autoneg;
6650         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6651                 support_serdes = 1;
6652                 support_copper = 1;
6653         } else if (bp->phy_port == PORT_FIBRE)
6654                 support_serdes = 1;
6655         else
6656                 support_copper = 1;
6657
6658         if (support_serdes) {
6659                 cmd->supported |= SUPPORTED_1000baseT_Full |
6660                         SUPPORTED_FIBRE;
6661                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6662                         cmd->supported |= SUPPORTED_2500baseX_Full;
6663
6664         }
6665         if (support_copper) {
6666                 cmd->supported |= SUPPORTED_10baseT_Half |
6667                         SUPPORTED_10baseT_Full |
6668                         SUPPORTED_100baseT_Half |
6669                         SUPPORTED_100baseT_Full |
6670                         SUPPORTED_1000baseT_Full |
6671                         SUPPORTED_TP;
6672
6673         }
6674
6675         spin_lock_bh(&bp->phy_lock);
6676         cmd->port = bp->phy_port;
6677         cmd->advertising = bp->advertising;
6678
6679         if (bp->autoneg & AUTONEG_SPEED) {
6680                 cmd->autoneg = AUTONEG_ENABLE;
6681         }
6682         else {
6683                 cmd->autoneg = AUTONEG_DISABLE;
6684         }
6685
6686         if (netif_carrier_ok(dev)) {
6687                 cmd->speed = bp->line_speed;
6688                 cmd->duplex = bp->duplex;
6689         }
6690         else {
6691                 cmd->speed = -1;
6692                 cmd->duplex = -1;
6693         }
6694         spin_unlock_bh(&bp->phy_lock);
6695
6696         cmd->transceiver = XCVR_INTERNAL;
6697         cmd->phy_address = bp->phy_addr;
6698
6699         return 0;
6700 }
6701
6702 static int
6703 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6704 {
6705         struct bnx2 *bp = netdev_priv(dev);
6706         u8 autoneg = bp->autoneg;
6707         u8 req_duplex = bp->req_duplex;
6708         u16 req_line_speed = bp->req_line_speed;
6709         u32 advertising = bp->advertising;
6710         int err = -EINVAL;
6711
6712         spin_lock_bh(&bp->phy_lock);
6713
6714         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6715                 goto err_out_unlock;
6716
6717         if (cmd->port != bp->phy_port &&
6718             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6719                 goto err_out_unlock;
6720
6721         /* If device is down, we can store the settings only if the user
6722          * is setting the currently active port.
6723          */
6724         if (!netif_running(dev) && cmd->port != bp->phy_port)
6725                 goto err_out_unlock;
6726
6727         if (cmd->autoneg == AUTONEG_ENABLE) {
6728                 autoneg |= AUTONEG_SPEED;
6729
6730                 advertising = cmd->advertising;
6731                 if (cmd->port == PORT_TP) {
6732                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6733                         if (!advertising)
6734                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6735                 } else {
6736                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6737                         if (!advertising)
6738                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6739                 }
6740                 advertising |= ADVERTISED_Autoneg;
6741         }
6742         else {
6743                 if (cmd->port == PORT_FIBRE) {
6744                         if ((cmd->speed != SPEED_1000 &&
6745                              cmd->speed != SPEED_2500) ||
6746                             (cmd->duplex != DUPLEX_FULL))
6747                                 goto err_out_unlock;
6748
6749                         if (cmd->speed == SPEED_2500 &&
6750                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6751                                 goto err_out_unlock;
6752                 }
6753                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6754                         goto err_out_unlock;
6755
6756                 autoneg &= ~AUTONEG_SPEED;
6757                 req_line_speed = cmd->speed;
6758                 req_duplex = cmd->duplex;
6759                 advertising = 0;
6760         }
6761
6762         bp->autoneg = autoneg;
6763         bp->advertising = advertising;
6764         bp->req_line_speed = req_line_speed;
6765         bp->req_duplex = req_duplex;
6766
6767         err = 0;
6768         /* If device is down, the new settings will be picked up when it is
6769          * brought up.
6770          */
6771         if (netif_running(dev))
6772                 err = bnx2_setup_phy(bp, cmd->port);
6773
6774 err_out_unlock:
6775         spin_unlock_bh(&bp->phy_lock);
6776
6777         return err;
6778 }
6779
6780 static void
6781 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6782 {
6783         struct bnx2 *bp = netdev_priv(dev);
6784
6785         strcpy(info->driver, DRV_MODULE_NAME);
6786         strcpy(info->version, DRV_MODULE_VERSION);
6787         strcpy(info->bus_info, pci_name(bp->pdev));
6788         strcpy(info->fw_version, bp->fw_version);
6789 }
6790
6791 #define BNX2_REGDUMP_LEN                (32 * 1024)
6792
6793 static int
6794 bnx2_get_regs_len(struct net_device *dev)
6795 {
6796         return BNX2_REGDUMP_LEN;
6797 }
6798
6799 static void
6800 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6801 {
6802         u32 *p = _p, i, offset;
6803         u8 *orig_p = _p;
6804         struct bnx2 *bp = netdev_priv(dev);
6805         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6806                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6807                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6808                                  0x1040, 0x1048, 0x1080, 0x10a4,
6809                                  0x1400, 0x1490, 0x1498, 0x14f0,
6810                                  0x1500, 0x155c, 0x1580, 0x15dc,
6811                                  0x1600, 0x1658, 0x1680, 0x16d8,
6812                                  0x1800, 0x1820, 0x1840, 0x1854,
6813                                  0x1880, 0x1894, 0x1900, 0x1984,
6814                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6815                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6816                                  0x2000, 0x2030, 0x23c0, 0x2400,
6817                                  0x2800, 0x2820, 0x2830, 0x2850,
6818                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6819                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6820                                  0x4080, 0x4090, 0x43c0, 0x4458,
6821                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6822                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6823                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6824                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6825                                  0x6800, 0x6848, 0x684c, 0x6860,
6826                                  0x6888, 0x6910, 0x8000 };
6827
6828         regs->version = 0;
6829
6830         memset(p, 0, BNX2_REGDUMP_LEN);
6831
6832         if (!netif_running(bp->dev))
6833                 return;
6834
6835         i = 0;
6836         offset = reg_boundaries[0];
6837         p += offset;
6838         while (offset < BNX2_REGDUMP_LEN) {
6839                 *p++ = REG_RD(bp, offset);
6840                 offset += 4;
6841                 if (offset == reg_boundaries[i + 1]) {
6842                         offset = reg_boundaries[i + 2];
6843                         p = (u32 *) (orig_p + offset);
6844                         i += 2;
6845                 }
6846         }
6847 }
6848
6849 static void
6850 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6851 {
6852         struct bnx2 *bp = netdev_priv(dev);
6853
6854         if (bp->flags & BNX2_FLAG_NO_WOL) {
6855                 wol->supported = 0;
6856                 wol->wolopts = 0;
6857         }
6858         else {
6859                 wol->supported = WAKE_MAGIC;
6860                 if (bp->wol)
6861                         wol->wolopts = WAKE_MAGIC;
6862                 else
6863                         wol->wolopts = 0;
6864         }
6865         memset(&wol->sopass, 0, sizeof(wol->sopass));
6866 }
6867
6868 static int
6869 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6870 {
6871         struct bnx2 *bp = netdev_priv(dev);
6872
6873         if (wol->wolopts & ~WAKE_MAGIC)
6874                 return -EINVAL;
6875
6876         if (wol->wolopts & WAKE_MAGIC) {
6877                 if (bp->flags & BNX2_FLAG_NO_WOL)
6878                         return -EINVAL;
6879
6880                 bp->wol = 1;
6881         }
6882         else {
6883                 bp->wol = 0;
6884         }
6885         return 0;
6886 }
6887
6888 static int
6889 bnx2_nway_reset(struct net_device *dev)
6890 {
6891         struct bnx2 *bp = netdev_priv(dev);
6892         u32 bmcr;
6893
6894         if (!netif_running(dev))
6895                 return -EAGAIN;
6896
6897         if (!(bp->autoneg & AUTONEG_SPEED)) {
6898                 return -EINVAL;
6899         }
6900
6901         spin_lock_bh(&bp->phy_lock);
6902
6903         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6904                 int rc;
6905
6906                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6907                 spin_unlock_bh(&bp->phy_lock);
6908                 return rc;
6909         }
6910
6911         /* Force a link down visible on the other side */
6912         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6913                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6914                 spin_unlock_bh(&bp->phy_lock);
6915
6916                 msleep(20);
6917
6918                 spin_lock_bh(&bp->phy_lock);
6919
6920                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6921                 bp->serdes_an_pending = 1;
6922                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6923         }
6924
6925         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6926         bmcr &= ~BMCR_LOOPBACK;
6927         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6928
6929         spin_unlock_bh(&bp->phy_lock);
6930
6931         return 0;
6932 }
6933
6934 static u32
6935 bnx2_get_link(struct net_device *dev)
6936 {
6937         struct bnx2 *bp = netdev_priv(dev);
6938
6939         return bp->link_up;
6940 }
6941
6942 static int
6943 bnx2_get_eeprom_len(struct net_device *dev)
6944 {
6945         struct bnx2 *bp = netdev_priv(dev);
6946
6947         if (bp->flash_info == NULL)
6948                 return 0;
6949
6950         return (int) bp->flash_size;
6951 }
6952
6953 static int
6954 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6955                 u8 *eebuf)
6956 {
6957         struct bnx2 *bp = netdev_priv(dev);
6958         int rc;
6959
6960         if (!netif_running(dev))
6961                 return -EAGAIN;
6962
6963         /* parameters already validated in ethtool_get_eeprom */
6964
6965         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6966
6967         return rc;
6968 }
6969
6970 static int
6971 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6972                 u8 *eebuf)
6973 {
6974         struct bnx2 *bp = netdev_priv(dev);
6975         int rc;
6976
6977         if (!netif_running(dev))
6978                 return -EAGAIN;
6979
6980         /* parameters already validated in ethtool_set_eeprom */
6981
6982         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6983
6984         return rc;
6985 }
6986
6987 static int
6988 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6989 {
6990         struct bnx2 *bp = netdev_priv(dev);
6991
6992         memset(coal, 0, sizeof(struct ethtool_coalesce));
6993
6994         coal->rx_coalesce_usecs = bp->rx_ticks;
6995         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6996         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6997         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6998
6999         coal->tx_coalesce_usecs = bp->tx_ticks;
7000         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7001         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7002         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7003
7004         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7005
7006         return 0;
7007 }
7008
7009 static int
7010 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7011 {
7012         struct bnx2 *bp = netdev_priv(dev);
7013
7014         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7015         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7016
7017         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7018         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7019
7020         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7021         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7022
7023         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7024         if (bp->rx_quick_cons_trip_int > 0xff)
7025                 bp->rx_quick_cons_trip_int = 0xff;
7026
7027         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7028         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7029
7030         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7031         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7032
7033         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7034         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7035
7036         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7037         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7038                 0xff;
7039
7040         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7041         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7042                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7043                         bp->stats_ticks = USEC_PER_SEC;
7044         }
7045         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7046                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7047         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7048
7049         if (netif_running(bp->dev)) {
7050                 bnx2_netif_stop(bp);
7051                 bnx2_init_nic(bp, 0);
7052                 bnx2_netif_start(bp);
7053         }
7054
7055         return 0;
7056 }
7057
7058 static void
7059 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7060 {
7061         struct bnx2 *bp = netdev_priv(dev);
7062
7063         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7064         ering->rx_mini_max_pending = 0;
7065         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7066
7067         ering->rx_pending = bp->rx_ring_size;
7068         ering->rx_mini_pending = 0;
7069         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7070
7071         ering->tx_max_pending = MAX_TX_DESC_CNT;
7072         ering->tx_pending = bp->tx_ring_size;
7073 }
7074
7075 static int
7076 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7077 {
7078         if (netif_running(bp->dev)) {
7079                 /* Reset will erase chipset stats; save them */
7080                 bnx2_save_stats(bp);
7081
7082                 bnx2_netif_stop(bp);
7083                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7084                 bnx2_free_skbs(bp);
7085                 bnx2_free_mem(bp);
7086         }
7087
7088         bnx2_set_rx_ring_size(bp, rx);
7089         bp->tx_ring_size = tx;
7090
7091         if (netif_running(bp->dev)) {
7092                 int rc;
7093
7094                 rc = bnx2_alloc_mem(bp);
7095                 if (!rc)
7096                         rc = bnx2_init_nic(bp, 0);
7097
7098                 if (rc) {
7099                         bnx2_napi_enable(bp);
7100                         dev_close(bp->dev);
7101                         return rc;
7102                 }
7103 #ifdef BCM_CNIC
7104                 mutex_lock(&bp->cnic_lock);
7105                 /* Let cnic know about the new status block. */
7106                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7107                         bnx2_setup_cnic_irq_info(bp);
7108                 mutex_unlock(&bp->cnic_lock);
7109 #endif
7110                 bnx2_netif_start(bp);
7111         }
7112         return 0;
7113 }
7114
7115 static int
7116 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7117 {
7118         struct bnx2 *bp = netdev_priv(dev);
7119         int rc;
7120
7121         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7122                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7123                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7124
7125                 return -EINVAL;
7126         }
7127         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7128         return rc;
7129 }
7130
7131 static void
7132 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7133 {
7134         struct bnx2 *bp = netdev_priv(dev);
7135
7136         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7137         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7138         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7139 }
7140
7141 static int
7142 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7143 {
7144         struct bnx2 *bp = netdev_priv(dev);
7145
7146         bp->req_flow_ctrl = 0;
7147         if (epause->rx_pause)
7148                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7149         if (epause->tx_pause)
7150                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7151
7152         if (epause->autoneg) {
7153                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7154         }
7155         else {
7156                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7157         }
7158
7159         if (netif_running(dev)) {
7160                 spin_lock_bh(&bp->phy_lock);
7161                 bnx2_setup_phy(bp, bp->phy_port);
7162                 spin_unlock_bh(&bp->phy_lock);
7163         }
7164
7165         return 0;
7166 }
7167
7168 static u32
7169 bnx2_get_rx_csum(struct net_device *dev)
7170 {
7171         struct bnx2 *bp = netdev_priv(dev);
7172
7173         return bp->rx_csum;
7174 }
7175
7176 static int
7177 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7178 {
7179         struct bnx2 *bp = netdev_priv(dev);
7180
7181         bp->rx_csum = data;
7182         return 0;
7183 }
7184
7185 static int
7186 bnx2_set_tso(struct net_device *dev, u32 data)
7187 {
7188         struct bnx2 *bp = netdev_priv(dev);
7189
7190         if (data) {
7191                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7192                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7193                         dev->features |= NETIF_F_TSO6;
7194         } else
7195                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7196                                    NETIF_F_TSO_ECN);
7197         return 0;
7198 }
7199
7200 static struct {
7201         char string[ETH_GSTRING_LEN];
7202 } bnx2_stats_str_arr[] = {
7203         { "rx_bytes" },
7204         { "rx_error_bytes" },
7205         { "tx_bytes" },
7206         { "tx_error_bytes" },
7207         { "rx_ucast_packets" },
7208         { "rx_mcast_packets" },
7209         { "rx_bcast_packets" },
7210         { "tx_ucast_packets" },
7211         { "tx_mcast_packets" },
7212         { "tx_bcast_packets" },
7213         { "tx_mac_errors" },
7214         { "tx_carrier_errors" },
7215         { "rx_crc_errors" },
7216         { "rx_align_errors" },
7217         { "tx_single_collisions" },
7218         { "tx_multi_collisions" },
7219         { "tx_deferred" },
7220         { "tx_excess_collisions" },
7221         { "tx_late_collisions" },
7222         { "tx_total_collisions" },
7223         { "rx_fragments" },
7224         { "rx_jabbers" },
7225         { "rx_undersize_packets" },
7226         { "rx_oversize_packets" },
7227         { "rx_64_byte_packets" },
7228         { "rx_65_to_127_byte_packets" },
7229         { "rx_128_to_255_byte_packets" },
7230         { "rx_256_to_511_byte_packets" },
7231         { "rx_512_to_1023_byte_packets" },
7232         { "rx_1024_to_1522_byte_packets" },
7233         { "rx_1523_to_9022_byte_packets" },
7234         { "tx_64_byte_packets" },
7235         { "tx_65_to_127_byte_packets" },
7236         { "tx_128_to_255_byte_packets" },
7237         { "tx_256_to_511_byte_packets" },
7238         { "tx_512_to_1023_byte_packets" },
7239         { "tx_1024_to_1522_byte_packets" },
7240         { "tx_1523_to_9022_byte_packets" },
7241         { "rx_xon_frames" },
7242         { "rx_xoff_frames" },
7243         { "tx_xon_frames" },
7244         { "tx_xoff_frames" },
7245         { "rx_mac_ctrl_frames" },
7246         { "rx_filtered_packets" },
7247         { "rx_ftq_discards" },
7248         { "rx_discards" },
7249         { "rx_fw_discards" },
7250 };
7251
7252 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7253                         sizeof(bnx2_stats_str_arr[0]))
7254
7255 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7256
7257 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7258     STATS_OFFSET32(stat_IfHCInOctets_hi),
7259     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7260     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7261     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7262     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7263     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7264     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7265     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7266     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7267     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7268     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7269     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7270     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7271     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7272     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7273     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7274     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7275     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7276     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7277     STATS_OFFSET32(stat_EtherStatsCollisions),
7278     STATS_OFFSET32(stat_EtherStatsFragments),
7279     STATS_OFFSET32(stat_EtherStatsJabbers),
7280     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7281     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7282     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7283     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7284     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7285     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7286     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7287     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7288     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7289     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7290     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7291     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7292     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7293     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7294     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7295     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7296     STATS_OFFSET32(stat_XonPauseFramesReceived),
7297     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7298     STATS_OFFSET32(stat_OutXonSent),
7299     STATS_OFFSET32(stat_OutXoffSent),
7300     STATS_OFFSET32(stat_MacControlFramesReceived),
7301     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7302     STATS_OFFSET32(stat_IfInFTQDiscards),
7303     STATS_OFFSET32(stat_IfInMBUFDiscards),
7304     STATS_OFFSET32(stat_FwRxDrop),
7305 };
7306
7307 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7308  * skipped because of errata.
7309  */
7310 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7311         8,0,8,8,8,8,8,8,8,8,
7312         4,0,4,4,4,4,4,4,4,4,
7313         4,4,4,4,4,4,4,4,4,4,
7314         4,4,4,4,4,4,4,4,4,4,
7315         4,4,4,4,4,4,4,
7316 };
7317
7318 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7319         8,0,8,8,8,8,8,8,8,8,
7320         4,4,4,4,4,4,4,4,4,4,
7321         4,4,4,4,4,4,4,4,4,4,
7322         4,4,4,4,4,4,4,4,4,4,
7323         4,4,4,4,4,4,4,
7324 };
7325
7326 #define BNX2_NUM_TESTS 6
7327
7328 static struct {
7329         char string[ETH_GSTRING_LEN];
7330 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7331         { "register_test (offline)" },
7332         { "memory_test (offline)" },
7333         { "loopback_test (offline)" },
7334         { "nvram_test (online)" },
7335         { "interrupt_test (online)" },
7336         { "link_test (online)" },
7337 };
7338
7339 static int
7340 bnx2_get_sset_count(struct net_device *dev, int sset)
7341 {
7342         switch (sset) {
7343         case ETH_SS_TEST:
7344                 return BNX2_NUM_TESTS;
7345         case ETH_SS_STATS:
7346                 return BNX2_NUM_STATS;
7347         default:
7348                 return -EOPNOTSUPP;
7349         }
7350 }
7351
7352 static void
7353 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7354 {
7355         struct bnx2 *bp = netdev_priv(dev);
7356
7357         bnx2_set_power_state(bp, PCI_D0);
7358
7359         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7360         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7361                 int i;
7362
7363                 bnx2_netif_stop(bp);
7364                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7365                 bnx2_free_skbs(bp);
7366
7367                 if (bnx2_test_registers(bp) != 0) {
7368                         buf[0] = 1;
7369                         etest->flags |= ETH_TEST_FL_FAILED;
7370                 }
7371                 if (bnx2_test_memory(bp) != 0) {
7372                         buf[1] = 1;
7373                         etest->flags |= ETH_TEST_FL_FAILED;
7374                 }
7375                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7376                         etest->flags |= ETH_TEST_FL_FAILED;
7377
7378                 if (!netif_running(bp->dev))
7379                         bnx2_shutdown_chip(bp);
7380                 else {
7381                         bnx2_init_nic(bp, 1);
7382                         bnx2_netif_start(bp);
7383                 }
7384
7385                 /* wait for link up */
7386                 for (i = 0; i < 7; i++) {
7387                         if (bp->link_up)
7388                                 break;
7389                         msleep_interruptible(1000);
7390                 }
7391         }
7392
7393         if (bnx2_test_nvram(bp) != 0) {
7394                 buf[3] = 1;
7395                 etest->flags |= ETH_TEST_FL_FAILED;
7396         }
7397         if (bnx2_test_intr(bp) != 0) {
7398                 buf[4] = 1;
7399                 etest->flags |= ETH_TEST_FL_FAILED;
7400         }
7401
7402         if (bnx2_test_link(bp) != 0) {
7403                 buf[5] = 1;
7404                 etest->flags |= ETH_TEST_FL_FAILED;
7405
7406         }
7407         if (!netif_running(bp->dev))
7408                 bnx2_set_power_state(bp, PCI_D3hot);
7409 }
7410
7411 static void
7412 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7413 {
7414         switch (stringset) {
7415         case ETH_SS_STATS:
7416                 memcpy(buf, bnx2_stats_str_arr,
7417                         sizeof(bnx2_stats_str_arr));
7418                 break;
7419         case ETH_SS_TEST:
7420                 memcpy(buf, bnx2_tests_str_arr,
7421                         sizeof(bnx2_tests_str_arr));
7422                 break;
7423         }
7424 }
7425
7426 static void
7427 bnx2_get_ethtool_stats(struct net_device *dev,
7428                 struct ethtool_stats *stats, u64 *buf)
7429 {
7430         struct bnx2 *bp = netdev_priv(dev);
7431         int i;
7432         u32 *hw_stats = (u32 *) bp->stats_blk;
7433         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7434         u8 *stats_len_arr = NULL;
7435
7436         if (hw_stats == NULL) {
7437                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7438                 return;
7439         }
7440
7441         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7442             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7443             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7444             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7445                 stats_len_arr = bnx2_5706_stats_len_arr;
7446         else
7447                 stats_len_arr = bnx2_5708_stats_len_arr;
7448
7449         for (i = 0; i < BNX2_NUM_STATS; i++) {
7450                 unsigned long offset;
7451
7452                 if (stats_len_arr[i] == 0) {
7453                         /* skip this counter */
7454                         buf[i] = 0;
7455                         continue;
7456                 }
7457
7458                 offset = bnx2_stats_offset_arr[i];
7459                 if (stats_len_arr[i] == 4) {
7460                         /* 4-byte counter */
7461                         buf[i] = (u64) *(hw_stats + offset) +
7462                                  *(temp_stats + offset);
7463                         continue;
7464                 }
7465                 /* 8-byte counter */
7466                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7467                          *(hw_stats + offset + 1) +
7468                          (((u64) *(temp_stats + offset)) << 32) +
7469                          *(temp_stats + offset + 1);
7470         }
7471 }
7472
7473 static int
7474 bnx2_phys_id(struct net_device *dev, u32 data)
7475 {
7476         struct bnx2 *bp = netdev_priv(dev);
7477         int i;
7478         u32 save;
7479
7480         bnx2_set_power_state(bp, PCI_D0);
7481
7482         if (data == 0)
7483                 data = 2;
7484
7485         save = REG_RD(bp, BNX2_MISC_CFG);
7486         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7487
7488         for (i = 0; i < (data * 2); i++) {
7489                 if ((i % 2) == 0) {
7490                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7491                 }
7492                 else {
7493                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7494                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7495                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7496                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7497                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7498                                 BNX2_EMAC_LED_TRAFFIC);
7499                 }
7500                 msleep_interruptible(500);
7501                 if (signal_pending(current))
7502                         break;
7503         }
7504         REG_WR(bp, BNX2_EMAC_LED, 0);
7505         REG_WR(bp, BNX2_MISC_CFG, save);
7506
7507         if (!netif_running(dev))
7508                 bnx2_set_power_state(bp, PCI_D3hot);
7509
7510         return 0;
7511 }
7512
7513 static int
7514 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7515 {
7516         struct bnx2 *bp = netdev_priv(dev);
7517
7518         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7519                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7520         else
7521                 return (ethtool_op_set_tx_csum(dev, data));
7522 }
7523
7524 static const struct ethtool_ops bnx2_ethtool_ops = {
7525         .get_settings           = bnx2_get_settings,
7526         .set_settings           = bnx2_set_settings,
7527         .get_drvinfo            = bnx2_get_drvinfo,
7528         .get_regs_len           = bnx2_get_regs_len,
7529         .get_regs               = bnx2_get_regs,
7530         .get_wol                = bnx2_get_wol,
7531         .set_wol                = bnx2_set_wol,
7532         .nway_reset             = bnx2_nway_reset,
7533         .get_link               = bnx2_get_link,
7534         .get_eeprom_len         = bnx2_get_eeprom_len,
7535         .get_eeprom             = bnx2_get_eeprom,
7536         .set_eeprom             = bnx2_set_eeprom,
7537         .get_coalesce           = bnx2_get_coalesce,
7538         .set_coalesce           = bnx2_set_coalesce,
7539         .get_ringparam          = bnx2_get_ringparam,
7540         .set_ringparam          = bnx2_set_ringparam,
7541         .get_pauseparam         = bnx2_get_pauseparam,
7542         .set_pauseparam         = bnx2_set_pauseparam,
7543         .get_rx_csum            = bnx2_get_rx_csum,
7544         .set_rx_csum            = bnx2_set_rx_csum,
7545         .set_tx_csum            = bnx2_set_tx_csum,
7546         .set_sg                 = ethtool_op_set_sg,
7547         .set_tso                = bnx2_set_tso,
7548         .self_test              = bnx2_self_test,
7549         .get_strings            = bnx2_get_strings,
7550         .phys_id                = bnx2_phys_id,
7551         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7552         .get_sset_count         = bnx2_get_sset_count,
7553 };
7554
7555 /* Called with rtnl_lock */
7556 static int
7557 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7558 {
7559         struct mii_ioctl_data *data = if_mii(ifr);
7560         struct bnx2 *bp = netdev_priv(dev);
7561         int err;
7562
7563         switch(cmd) {
7564         case SIOCGMIIPHY:
7565                 data->phy_id = bp->phy_addr;
7566
7567                 /* fallthru */
7568         case SIOCGMIIREG: {
7569                 u32 mii_regval;
7570
7571                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7572                         return -EOPNOTSUPP;
7573
7574                 if (!netif_running(dev))
7575                         return -EAGAIN;
7576
7577                 spin_lock_bh(&bp->phy_lock);
7578                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7579                 spin_unlock_bh(&bp->phy_lock);
7580
7581                 data->val_out = mii_regval;
7582
7583                 return err;
7584         }
7585
7586         case SIOCSMIIREG:
7587                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7588                         return -EOPNOTSUPP;
7589
7590                 if (!netif_running(dev))
7591                         return -EAGAIN;
7592
7593                 spin_lock_bh(&bp->phy_lock);
7594                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7595                 spin_unlock_bh(&bp->phy_lock);
7596
7597                 return err;
7598
7599         default:
7600                 /* do nothing */
7601                 break;
7602         }
7603         return -EOPNOTSUPP;
7604 }
7605
7606 /* Called with rtnl_lock */
7607 static int
7608 bnx2_change_mac_addr(struct net_device *dev, void *p)
7609 {
7610         struct sockaddr *addr = p;
7611         struct bnx2 *bp = netdev_priv(dev);
7612
7613         if (!is_valid_ether_addr(addr->sa_data))
7614                 return -EINVAL;
7615
7616         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7617         if (netif_running(dev))
7618                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7619
7620         return 0;
7621 }
7622
7623 /* Called with rtnl_lock */
7624 static int
7625 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7626 {
7627         struct bnx2 *bp = netdev_priv(dev);
7628
7629         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7630                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7631                 return -EINVAL;
7632
7633         dev->mtu = new_mtu;
7634         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7635 }
7636
7637 #ifdef CONFIG_NET_POLL_CONTROLLER
7638 static void
7639 poll_bnx2(struct net_device *dev)
7640 {
7641         struct bnx2 *bp = netdev_priv(dev);
7642         int i;
7643
7644         for (i = 0; i < bp->irq_nvecs; i++) {
7645                 disable_irq(bp->irq_tbl[i].vector);
7646                 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7647                 enable_irq(bp->irq_tbl[i].vector);
7648         }
7649 }
7650 #endif
7651
7652 static void __devinit
7653 bnx2_get_5709_media(struct bnx2 *bp)
7654 {
7655         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7656         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7657         u32 strap;
7658
7659         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7660                 return;
7661         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7662                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7663                 return;
7664         }
7665
7666         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7667                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7668         else
7669                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7670
7671         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7672                 switch (strap) {
7673                 case 0x4:
7674                 case 0x5:
7675                 case 0x6:
7676                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7677                         return;
7678                 }
7679         } else {
7680                 switch (strap) {
7681                 case 0x1:
7682                 case 0x2:
7683                 case 0x4:
7684                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7685                         return;
7686                 }
7687         }
7688 }
7689
7690 static void __devinit
7691 bnx2_get_pci_speed(struct bnx2 *bp)
7692 {
7693         u32 reg;
7694
7695         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7696         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7697                 u32 clkreg;
7698
7699                 bp->flags |= BNX2_FLAG_PCIX;
7700
7701                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7702
7703                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7704                 switch (clkreg) {
7705                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7706                         bp->bus_speed_mhz = 133;
7707                         break;
7708
7709                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7710                         bp->bus_speed_mhz = 100;
7711                         break;
7712
7713                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7714                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7715                         bp->bus_speed_mhz = 66;
7716                         break;
7717
7718                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7719                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7720                         bp->bus_speed_mhz = 50;
7721                         break;
7722
7723                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7724                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7725                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7726                         bp->bus_speed_mhz = 33;
7727                         break;
7728                 }
7729         }
7730         else {
7731                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7732                         bp->bus_speed_mhz = 66;
7733                 else
7734                         bp->bus_speed_mhz = 33;
7735         }
7736
7737         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7738                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7739
7740 }
7741
7742 static void __devinit
7743 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7744 {
7745         int rc, i, j;
7746         u8 *data;
7747         unsigned int block_end, rosize, len;
7748
7749 #define BNX2_VPD_NVRAM_OFFSET   0x300
7750 #define BNX2_VPD_LEN            128
7751 #define BNX2_MAX_VER_SLEN       30
7752
7753         data = kmalloc(256, GFP_KERNEL);
7754         if (!data)
7755                 return;
7756
7757         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7758                              BNX2_VPD_LEN);
7759         if (rc)
7760                 goto vpd_done;
7761
7762         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7763                 data[i] = data[i + BNX2_VPD_LEN + 3];
7764                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7765                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7766                 data[i + 3] = data[i + BNX2_VPD_LEN];
7767         }
7768
7769         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7770         if (i < 0)
7771                 goto vpd_done;
7772
7773         rosize = pci_vpd_lrdt_size(&data[i]);
7774         i += PCI_VPD_LRDT_TAG_SIZE;
7775         block_end = i + rosize;
7776
7777         if (block_end > BNX2_VPD_LEN)
7778                 goto vpd_done;
7779
7780         j = pci_vpd_find_info_keyword(data, i, rosize,
7781                                       PCI_VPD_RO_KEYWORD_MFR_ID);
7782         if (j < 0)
7783                 goto vpd_done;
7784
7785         len = pci_vpd_info_field_size(&data[j]);
7786
7787         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7788         if (j + len > block_end || len != 4 ||
7789             memcmp(&data[j], "1028", 4))
7790                 goto vpd_done;
7791
7792         j = pci_vpd_find_info_keyword(data, i, rosize,
7793                                       PCI_VPD_RO_KEYWORD_VENDOR0);
7794         if (j < 0)
7795                 goto vpd_done;
7796
7797         len = pci_vpd_info_field_size(&data[j]);
7798
7799         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7800         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7801                 goto vpd_done;
7802
7803         memcpy(bp->fw_version, &data[j], len);
7804         bp->fw_version[len] = ' ';
7805
7806 vpd_done:
7807         kfree(data);
7808 }
7809
7810 static int __devinit
7811 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7812 {
7813         struct bnx2 *bp;
7814         unsigned long mem_len;
7815         int rc, i, j;
7816         u32 reg;
7817         u64 dma_mask, persist_dma_mask;
7818
7819         SET_NETDEV_DEV(dev, &pdev->dev);
7820         bp = netdev_priv(dev);
7821
7822         bp->flags = 0;
7823         bp->phy_flags = 0;
7824
7825         bp->temp_stats_blk =
7826                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7827
7828         if (bp->temp_stats_blk == NULL) {
7829                 rc = -ENOMEM;
7830                 goto err_out;
7831         }
7832
7833         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7834         rc = pci_enable_device(pdev);
7835         if (rc) {
7836                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7837                 goto err_out;
7838         }
7839
7840         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7841                 dev_err(&pdev->dev,
7842                         "Cannot find PCI device base address, aborting\n");
7843                 rc = -ENODEV;
7844                 goto err_out_disable;
7845         }
7846
7847         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7848         if (rc) {
7849                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7850                 goto err_out_disable;
7851         }
7852
7853         pci_set_master(pdev);
7854         pci_save_state(pdev);
7855
7856         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7857         if (bp->pm_cap == 0) {
7858                 dev_err(&pdev->dev,
7859                         "Cannot find power management capability, aborting\n");
7860                 rc = -EIO;
7861                 goto err_out_release;
7862         }
7863
7864         bp->dev = dev;
7865         bp->pdev = pdev;
7866
7867         spin_lock_init(&bp->phy_lock);
7868         spin_lock_init(&bp->indirect_lock);
7869 #ifdef BCM_CNIC
7870         mutex_init(&bp->cnic_lock);
7871 #endif
7872         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7873
7874         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7875         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7876         dev->mem_end = dev->mem_start + mem_len;
7877         dev->irq = pdev->irq;
7878
7879         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7880
7881         if (!bp->regview) {
7882                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7883                 rc = -ENOMEM;
7884                 goto err_out_release;
7885         }
7886
7887         /* Configure byte swap and enable write to the reg_window registers.
7888          * Rely on CPU to do target byte swapping on big endian systems
7889          * The chip's target access swapping will not swap all accesses
7890          */
7891         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7892                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7893                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7894
7895         bnx2_set_power_state(bp, PCI_D0);
7896
7897         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7898
7899         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7900                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7901                         dev_err(&pdev->dev,
7902                                 "Cannot find PCIE capability, aborting\n");
7903                         rc = -EIO;
7904                         goto err_out_unmap;
7905                 }
7906                 bp->flags |= BNX2_FLAG_PCIE;
7907                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7908                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7909         } else {
7910                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7911                 if (bp->pcix_cap == 0) {
7912                         dev_err(&pdev->dev,
7913                                 "Cannot find PCIX capability, aborting\n");
7914                         rc = -EIO;
7915                         goto err_out_unmap;
7916                 }
7917                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7918         }
7919
7920         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7921                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7922                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7923         }
7924
7925         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7926                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7927                         bp->flags |= BNX2_FLAG_MSI_CAP;
7928         }
7929
7930         /* 5708 cannot support DMA addresses > 40-bit.  */
7931         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7932                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7933         else
7934                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7935
7936         /* Configure DMA attributes. */
7937         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7938                 dev->features |= NETIF_F_HIGHDMA;
7939                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7940                 if (rc) {
7941                         dev_err(&pdev->dev,
7942                                 "pci_set_consistent_dma_mask failed, aborting\n");
7943                         goto err_out_unmap;
7944                 }
7945         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7946                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7947                 goto err_out_unmap;
7948         }
7949
7950         if (!(bp->flags & BNX2_FLAG_PCIE))
7951                 bnx2_get_pci_speed(bp);
7952
7953         /* 5706A0 may falsely detect SERR and PERR. */
7954         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7955                 reg = REG_RD(bp, PCI_COMMAND);
7956                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7957                 REG_WR(bp, PCI_COMMAND, reg);
7958         }
7959         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7960                 !(bp->flags & BNX2_FLAG_PCIX)) {
7961
7962                 dev_err(&pdev->dev,
7963                         "5706 A1 can only be used in a PCIX bus, aborting\n");
7964                 goto err_out_unmap;
7965         }
7966
7967         bnx2_init_nvram(bp);
7968
7969         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7970
7971         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7972             BNX2_SHM_HDR_SIGNATURE_SIG) {
7973                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7974
7975                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7976         } else
7977                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7978
7979         /* Get the permanent MAC address.  First we need to make sure the
7980          * firmware is actually running.
7981          */
7982         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7983
7984         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7985             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7986                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
7987                 rc = -ENODEV;
7988                 goto err_out_unmap;
7989         }
7990
7991         bnx2_read_vpd_fw_ver(bp);
7992
7993         j = strlen(bp->fw_version);
7994         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7995         for (i = 0; i < 3 && j < 24; i++) {
7996                 u8 num, k, skip0;
7997
7998                 if (i == 0) {
7999                         bp->fw_version[j++] = 'b';
8000                         bp->fw_version[j++] = 'c';
8001                         bp->fw_version[j++] = ' ';
8002                 }
8003                 num = (u8) (reg >> (24 - (i * 8)));
8004                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8005                         if (num >= k || !skip0 || k == 1) {
8006                                 bp->fw_version[j++] = (num / k) + '0';
8007                                 skip0 = 0;
8008                         }
8009                 }
8010                 if (i != 2)
8011                         bp->fw_version[j++] = '.';
8012         }
8013         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8014         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8015                 bp->wol = 1;
8016
8017         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8018                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8019
8020                 for (i = 0; i < 30; i++) {
8021                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8022                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8023                                 break;
8024                         msleep(10);
8025                 }
8026         }
8027         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8028         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8029         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8030             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8031                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8032
8033                 if (j < 32)
8034                         bp->fw_version[j++] = ' ';
8035                 for (i = 0; i < 3 && j < 28; i++) {
8036                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8037                         reg = swab32(reg);
8038                         memcpy(&bp->fw_version[j], &reg, 4);
8039                         j += 4;
8040                 }
8041         }
8042
8043         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8044         bp->mac_addr[0] = (u8) (reg >> 8);
8045         bp->mac_addr[1] = (u8) reg;
8046
8047         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8048         bp->mac_addr[2] = (u8) (reg >> 24);
8049         bp->mac_addr[3] = (u8) (reg >> 16);
8050         bp->mac_addr[4] = (u8) (reg >> 8);
8051         bp->mac_addr[5] = (u8) reg;
8052
8053         bp->tx_ring_size = MAX_TX_DESC_CNT;
8054         bnx2_set_rx_ring_size(bp, 255);
8055
8056         bp->rx_csum = 1;
8057
8058         bp->tx_quick_cons_trip_int = 2;
8059         bp->tx_quick_cons_trip = 20;
8060         bp->tx_ticks_int = 18;
8061         bp->tx_ticks = 80;
8062
8063         bp->rx_quick_cons_trip_int = 2;
8064         bp->rx_quick_cons_trip = 12;
8065         bp->rx_ticks_int = 18;
8066         bp->rx_ticks = 18;
8067
8068         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8069
8070         bp->current_interval = BNX2_TIMER_INTERVAL;
8071
8072         bp->phy_addr = 1;
8073
8074         /* Disable WOL support if we are running on a SERDES chip. */
8075         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8076                 bnx2_get_5709_media(bp);
8077         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8078                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8079
8080         bp->phy_port = PORT_TP;
8081         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8082                 bp->phy_port = PORT_FIBRE;
8083                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8084                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8085                         bp->flags |= BNX2_FLAG_NO_WOL;
8086                         bp->wol = 0;
8087                 }
8088                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8089                         /* Don't do parallel detect on this board because of
8090                          * some board problems.  The link will not go down
8091                          * if we do parallel detect.
8092                          */
8093                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8094                             pdev->subsystem_device == 0x310c)
8095                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8096                 } else {
8097                         bp->phy_addr = 2;
8098                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8099                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8100                 }
8101         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8102                    CHIP_NUM(bp) == CHIP_NUM_5708)
8103                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8104         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8105                  (CHIP_REV(bp) == CHIP_REV_Ax ||
8106                   CHIP_REV(bp) == CHIP_REV_Bx))
8107                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8108
8109         bnx2_init_fw_cap(bp);
8110
8111         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8112             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8113             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8114             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8115                 bp->flags |= BNX2_FLAG_NO_WOL;
8116                 bp->wol = 0;
8117         }
8118
8119         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8120                 bp->tx_quick_cons_trip_int =
8121                         bp->tx_quick_cons_trip;
8122                 bp->tx_ticks_int = bp->tx_ticks;
8123                 bp->rx_quick_cons_trip_int =
8124                         bp->rx_quick_cons_trip;
8125                 bp->rx_ticks_int = bp->rx_ticks;
8126                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8127                 bp->com_ticks_int = bp->com_ticks;
8128                 bp->cmd_ticks_int = bp->cmd_ticks;
8129         }
8130
8131         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8132          *
8133          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8134          * with byte enables disabled on the unused 32-bit word.  This is legal
8135          * but causes problems on the AMD 8132 which will eventually stop
8136          * responding after a while.
8137          *
8138          * AMD believes this incompatibility is unique to the 5706, and
8139          * prefers to locally disable MSI rather than globally disabling it.
8140          */
8141         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8142                 struct pci_dev *amd_8132 = NULL;
8143
8144                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8145                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8146                                                   amd_8132))) {
8147
8148                         if (amd_8132->revision >= 0x10 &&
8149                             amd_8132->revision <= 0x13) {
8150                                 disable_msi = 1;
8151                                 pci_dev_put(amd_8132);
8152                                 break;
8153                         }
8154                 }
8155         }
8156
8157         bnx2_set_default_link(bp);
8158         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8159
8160         init_timer(&bp->timer);
8161         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8162         bp->timer.data = (unsigned long) bp;
8163         bp->timer.function = bnx2_timer;
8164
8165         return 0;
8166
8167 err_out_unmap:
8168         if (bp->regview) {
8169                 iounmap(bp->regview);
8170                 bp->regview = NULL;
8171         }
8172
8173 err_out_release:
8174         pci_release_regions(pdev);
8175
8176 err_out_disable:
8177         pci_disable_device(pdev);
8178         pci_set_drvdata(pdev, NULL);
8179
8180 err_out:
8181         return rc;
8182 }
8183
8184 static char * __devinit
8185 bnx2_bus_string(struct bnx2 *bp, char *str)
8186 {
8187         char *s = str;
8188
8189         if (bp->flags & BNX2_FLAG_PCIE) {
8190                 s += sprintf(s, "PCI Express");
8191         } else {
8192                 s += sprintf(s, "PCI");
8193                 if (bp->flags & BNX2_FLAG_PCIX)
8194                         s += sprintf(s, "-X");
8195                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8196                         s += sprintf(s, " 32-bit");
8197                 else
8198                         s += sprintf(s, " 64-bit");
8199                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8200         }
8201         return str;
8202 }
8203
8204 static void __devinit
8205 bnx2_init_napi(struct bnx2 *bp)
8206 {
8207         int i;
8208
8209         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
8210                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8211                 int (*poll)(struct napi_struct *, int);
8212
8213                 if (i == 0)
8214                         poll = bnx2_poll;
8215                 else
8216                         poll = bnx2_poll_msix;
8217
8218                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8219                 bnapi->bp = bp;
8220         }
8221 }
8222
8223 static const struct net_device_ops bnx2_netdev_ops = {
8224         .ndo_open               = bnx2_open,
8225         .ndo_start_xmit         = bnx2_start_xmit,
8226         .ndo_stop               = bnx2_close,
8227         .ndo_get_stats          = bnx2_get_stats,
8228         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8229         .ndo_do_ioctl           = bnx2_ioctl,
8230         .ndo_validate_addr      = eth_validate_addr,
8231         .ndo_set_mac_address    = bnx2_change_mac_addr,
8232         .ndo_change_mtu         = bnx2_change_mtu,
8233         .ndo_tx_timeout         = bnx2_tx_timeout,
8234 #ifdef BCM_VLAN
8235         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
8236 #endif
8237 #ifdef CONFIG_NET_POLL_CONTROLLER
8238         .ndo_poll_controller    = poll_bnx2,
8239 #endif
8240 };
8241
8242 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8243 {
8244 #ifdef BCM_VLAN
8245         dev->vlan_features |= flags;
8246 #endif
8247 }
8248
8249 static int __devinit
8250 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8251 {
8252         static int version_printed = 0;
8253         struct net_device *dev = NULL;
8254         struct bnx2 *bp;
8255         int rc;
8256         char str[40];
8257
8258         if (version_printed++ == 0)
8259                 pr_info("%s", version);
8260
8261         /* dev zeroed in init_etherdev */
8262         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8263
8264         if (!dev)
8265                 return -ENOMEM;
8266
8267         rc = bnx2_init_board(pdev, dev);
8268         if (rc < 0) {
8269                 free_netdev(dev);
8270                 return rc;
8271         }
8272
8273         dev->netdev_ops = &bnx2_netdev_ops;
8274         dev->watchdog_timeo = TX_TIMEOUT;
8275         dev->ethtool_ops = &bnx2_ethtool_ops;
8276
8277         bp = netdev_priv(dev);
8278         bnx2_init_napi(bp);
8279
8280         pci_set_drvdata(pdev, dev);
8281
8282         rc = bnx2_request_firmware(bp);
8283         if (rc)
8284                 goto error;
8285
8286         memcpy(dev->dev_addr, bp->mac_addr, 6);
8287         memcpy(dev->perm_addr, bp->mac_addr, 6);
8288
8289         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8290         vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8291         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8292                 dev->features |= NETIF_F_IPV6_CSUM;
8293                 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8294         }
8295 #ifdef BCM_VLAN
8296         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8297 #endif
8298         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8299         vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8300         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8301                 dev->features |= NETIF_F_TSO6;
8302                 vlan_features_add(dev, NETIF_F_TSO6);
8303         }
8304         if ((rc = register_netdev(dev))) {
8305                 dev_err(&pdev->dev, "Cannot register net device\n");
8306                 goto error;
8307         }
8308
8309         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8310                     board_info[ent->driver_data].name,
8311                     ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8312                     ((CHIP_ID(bp) & 0x0ff0) >> 4),
8313                     bnx2_bus_string(bp, str),
8314                     dev->base_addr,
8315                     bp->pdev->irq, dev->dev_addr);
8316
8317         return 0;
8318
8319 error:
8320         if (bp->mips_firmware)
8321                 release_firmware(bp->mips_firmware);
8322         if (bp->rv2p_firmware)
8323                 release_firmware(bp->rv2p_firmware);
8324
8325         if (bp->regview)
8326                 iounmap(bp->regview);
8327         pci_release_regions(pdev);
8328         pci_disable_device(pdev);
8329         pci_set_drvdata(pdev, NULL);
8330         free_netdev(dev);
8331         return rc;
8332 }
8333
8334 static void __devexit
8335 bnx2_remove_one(struct pci_dev *pdev)
8336 {
8337         struct net_device *dev = pci_get_drvdata(pdev);
8338         struct bnx2 *bp = netdev_priv(dev);
8339
8340         flush_scheduled_work();
8341
8342         unregister_netdev(dev);
8343
8344         if (bp->mips_firmware)
8345                 release_firmware(bp->mips_firmware);
8346         if (bp->rv2p_firmware)
8347                 release_firmware(bp->rv2p_firmware);
8348
8349         if (bp->regview)
8350                 iounmap(bp->regview);
8351
8352         kfree(bp->temp_stats_blk);
8353
8354         free_netdev(dev);
8355         pci_release_regions(pdev);
8356         pci_disable_device(pdev);
8357         pci_set_drvdata(pdev, NULL);
8358 }
8359
8360 static int
8361 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8362 {
8363         struct net_device *dev = pci_get_drvdata(pdev);
8364         struct bnx2 *bp = netdev_priv(dev);
8365
8366         /* PCI register 4 needs to be saved whether netif_running() or not.
8367          * MSI address and data need to be saved if using MSI and
8368          * netif_running().
8369          */
8370         pci_save_state(pdev);
8371         if (!netif_running(dev))
8372                 return 0;
8373
8374         flush_scheduled_work();
8375         bnx2_netif_stop(bp);
8376         netif_device_detach(dev);
8377         del_timer_sync(&bp->timer);
8378         bnx2_shutdown_chip(bp);
8379         bnx2_free_skbs(bp);
8380         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8381         return 0;
8382 }
8383
8384 static int
8385 bnx2_resume(struct pci_dev *pdev)
8386 {
8387         struct net_device *dev = pci_get_drvdata(pdev);
8388         struct bnx2 *bp = netdev_priv(dev);
8389
8390         pci_restore_state(pdev);
8391         if (!netif_running(dev))
8392                 return 0;
8393
8394         bnx2_set_power_state(bp, PCI_D0);
8395         netif_device_attach(dev);
8396         bnx2_init_nic(bp, 1);
8397         bnx2_netif_start(bp);
8398         return 0;
8399 }
8400
8401 /**
8402  * bnx2_io_error_detected - called when PCI error is detected
8403  * @pdev: Pointer to PCI device
8404  * @state: The current pci connection state
8405  *
8406  * This function is called after a PCI bus error affecting
8407  * this device has been detected.
8408  */
8409 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8410                                                pci_channel_state_t state)
8411 {
8412         struct net_device *dev = pci_get_drvdata(pdev);
8413         struct bnx2 *bp = netdev_priv(dev);
8414
8415         rtnl_lock();
8416         netif_device_detach(dev);
8417
8418         if (state == pci_channel_io_perm_failure) {
8419                 rtnl_unlock();
8420                 return PCI_ERS_RESULT_DISCONNECT;
8421         }
8422
8423         if (netif_running(dev)) {
8424                 bnx2_netif_stop(bp);
8425                 del_timer_sync(&bp->timer);
8426                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8427         }
8428
8429         pci_disable_device(pdev);
8430         rtnl_unlock();
8431
8432         /* Request a slot slot reset. */
8433         return PCI_ERS_RESULT_NEED_RESET;
8434 }
8435
8436 /**
8437  * bnx2_io_slot_reset - called after the pci bus has been reset.
8438  * @pdev: Pointer to PCI device
8439  *
8440  * Restart the card from scratch, as if from a cold-boot.
8441  */
8442 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8443 {
8444         struct net_device *dev = pci_get_drvdata(pdev);
8445         struct bnx2 *bp = netdev_priv(dev);
8446
8447         rtnl_lock();
8448         if (pci_enable_device(pdev)) {
8449                 dev_err(&pdev->dev,
8450                         "Cannot re-enable PCI device after reset\n");
8451                 rtnl_unlock();
8452                 return PCI_ERS_RESULT_DISCONNECT;
8453         }
8454         pci_set_master(pdev);
8455         pci_restore_state(pdev);
8456         pci_save_state(pdev);
8457
8458         if (netif_running(dev)) {
8459                 bnx2_set_power_state(bp, PCI_D0);
8460                 bnx2_init_nic(bp, 1);
8461         }
8462
8463         rtnl_unlock();
8464         return PCI_ERS_RESULT_RECOVERED;
8465 }
8466
8467 /**
8468  * bnx2_io_resume - called when traffic can start flowing again.
8469  * @pdev: Pointer to PCI device
8470  *
8471  * This callback is called when the error recovery driver tells us that
8472  * its OK to resume normal operation.
8473  */
8474 static void bnx2_io_resume(struct pci_dev *pdev)
8475 {
8476         struct net_device *dev = pci_get_drvdata(pdev);
8477         struct bnx2 *bp = netdev_priv(dev);
8478
8479         rtnl_lock();
8480         if (netif_running(dev))
8481                 bnx2_netif_start(bp);
8482
8483         netif_device_attach(dev);
8484         rtnl_unlock();
8485 }
8486
8487 static struct pci_error_handlers bnx2_err_handler = {
8488         .error_detected = bnx2_io_error_detected,
8489         .slot_reset     = bnx2_io_slot_reset,
8490         .resume         = bnx2_io_resume,
8491 };
8492
8493 static struct pci_driver bnx2_pci_driver = {
8494         .name           = DRV_MODULE_NAME,
8495         .id_table       = bnx2_pci_tbl,
8496         .probe          = bnx2_init_one,
8497         .remove         = __devexit_p(bnx2_remove_one),
8498         .suspend        = bnx2_suspend,
8499         .resume         = bnx2_resume,
8500         .err_handler    = &bnx2_err_handler,
8501 };
8502
8503 static int __init bnx2_init(void)
8504 {
8505         return pci_register_driver(&bnx2_pci_driver);
8506 }
8507
8508 static void __exit bnx2_cleanup(void)
8509 {
8510         pci_unregister_driver(&bnx2_pci_driver);
8511 }
8512
8513 module_init(bnx2_init);
8514 module_exit(bnx2_cleanup);
8515
8516
8517