ath9k: Enable MIB and TIM interrupts for station mode.
[pandora-kernel.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50 #include <linux/log2.h>
51
52 #include "bnx2.h"
53 #include "bnx2_fw.h"
54 #include "bnx2_fw2.h"
55
56 #define FW_BUF_SIZE             0x10000
57
58 #define DRV_MODULE_NAME         "bnx2"
59 #define PFX DRV_MODULE_NAME     ": "
60 #define DRV_MODULE_VERSION      "1.9.0"
61 #define DRV_MODULE_RELDATE      "Dec 16, 2008"
62
63 #define RUN_AT(x) (jiffies + (x))
64
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT  (5*HZ)
67
68 static char version[] __devinitdata =
69         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_msi = 0;
77
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81 typedef enum {
82         BCM5706 = 0,
83         NC370T,
84         NC370I,
85         BCM5706S,
86         NC370F,
87         BCM5708,
88         BCM5708S,
89         BCM5709,
90         BCM5709S,
91         BCM5716,
92         BCM5716S,
93 } board_t;
94
95 /* indexed by board_t, above */
96 static struct {
97         char *name;
98 } board_info[] __devinitdata = {
99         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
100         { "HP NC370T Multifunction Gigabit Server Adapter" },
101         { "HP NC370i Multifunction Gigabit Server Adapter" },
102         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
103         { "HP NC370F Multifunction Gigabit Server Adapter" },
104         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
105         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
106         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
107         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
108         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
109         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
110         };
111
112 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
113         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
114           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
116           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
118           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
120           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
122           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
124           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
126           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
128           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
131         { PCI_VENDOR_ID_BROADCOM, 0x163b,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
133         { PCI_VENDOR_ID_BROADCOM, 0x163c,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
135         { 0, }
136 };
137
138 static struct flash_spec flash_table[] =
139 {
140 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
141 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
142         /* Slow EEPROM */
143         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
144          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
145          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
146          "EEPROM - slow"},
147         /* Expansion entry 0001 */
148         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
149          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151          "Entry 0001"},
152         /* Saifun SA25F010 (non-buffered flash) */
153         /* strap, cfg1, & write1 need updates */
154         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
155          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
157          "Non-buffered flash (128kB)"},
158         /* Saifun SA25F020 (non-buffered flash) */
159         /* strap, cfg1, & write1 need updates */
160         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
163          "Non-buffered flash (256kB)"},
164         /* Expansion entry 0100 */
165         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
168          "Entry 0100"},
169         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
170         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
171          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
172          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
173          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
174         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
175         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
176          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
177          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
178          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
179         /* Saifun SA25F005 (non-buffered flash) */
180         /* strap, cfg1, & write1 need updates */
181         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
183          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
184          "Non-buffered flash (64kB)"},
185         /* Fast EEPROM */
186         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
187          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
188          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
189          "EEPROM - fast"},
190         /* Expansion entry 1001 */
191         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
192          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194          "Entry 1001"},
195         /* Expansion entry 1010 */
196         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
197          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199          "Entry 1010"},
200         /* ATMEL AT45DB011B (buffered flash) */
201         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
202          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
203          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
204          "Buffered flash (128kB)"},
205         /* Expansion entry 1100 */
206         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
207          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209          "Entry 1100"},
210         /* Expansion entry 1101 */
211         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
212          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214          "Entry 1101"},
215         /* Ateml Expansion entry 1110 */
216         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
217          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
218          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
219          "Entry 1110 (Atmel)"},
220         /* ATMEL AT45DB021B (buffered flash) */
221         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
222          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
223          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
224          "Buffered flash (256kB)"},
225 };
226
227 static struct flash_spec flash_5709 = {
228         .flags          = BNX2_NV_BUFFERED,
229         .page_bits      = BCM5709_FLASH_PAGE_BITS,
230         .page_size      = BCM5709_FLASH_PAGE_SIZE,
231         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
232         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
233         .name           = "5709 Buffered flash (256kB)",
234 };
235
236 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
237
238 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
239 {
240         u32 diff;
241
242         smp_mb();
243
244         /* The ring uses 256 indices for 255 entries, one of them
245          * needs to be skipped.
246          */
247         diff = txr->tx_prod - txr->tx_cons;
248         if (unlikely(diff >= TX_DESC_CNT)) {
249                 diff &= 0xffff;
250                 if (diff == TX_DESC_CNT)
251                         diff = MAX_TX_DESC_CNT;
252         }
253         return (bp->tx_ring_size - diff);
254 }
255
256 static u32
257 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
258 {
259         u32 val;
260
261         spin_lock_bh(&bp->indirect_lock);
262         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
263         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
264         spin_unlock_bh(&bp->indirect_lock);
265         return val;
266 }
267
268 static void
269 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
270 {
271         spin_lock_bh(&bp->indirect_lock);
272         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
273         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
274         spin_unlock_bh(&bp->indirect_lock);
275 }
276
277 static void
278 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
279 {
280         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
281 }
282
283 static u32
284 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
285 {
286         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
287 }
288
289 static void
290 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
291 {
292         offset += cid_addr;
293         spin_lock_bh(&bp->indirect_lock);
294         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
295                 int i;
296
297                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
298                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
299                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
300                 for (i = 0; i < 5; i++) {
301                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
302                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
303                                 break;
304                         udelay(5);
305                 }
306         } else {
307                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
308                 REG_WR(bp, BNX2_CTX_DATA, val);
309         }
310         spin_unlock_bh(&bp->indirect_lock);
311 }
312
313 static int
314 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
315 {
316         u32 val1;
317         int i, ret;
318
319         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
320                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
322
323                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
324                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325
326                 udelay(40);
327         }
328
329         val1 = (bp->phy_addr << 21) | (reg << 16) |
330                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
331                 BNX2_EMAC_MDIO_COMM_START_BUSY;
332         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
333
334         for (i = 0; i < 50; i++) {
335                 udelay(10);
336
337                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
339                         udelay(5);
340
341                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
342                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
343
344                         break;
345                 }
346         }
347
348         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349                 *val = 0x0;
350                 ret = -EBUSY;
351         }
352         else {
353                 *val = val1;
354                 ret = 0;
355         }
356
357         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
358                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
359                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360
361                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
362                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363
364                 udelay(40);
365         }
366
367         return ret;
368 }
369
370 static int
371 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
372 {
373         u32 val1;
374         int i, ret;
375
376         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
377                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
379
380                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
381                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
382
383                 udelay(40);
384         }
385
386         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
387                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
388                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
389         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
390
391         for (i = 0; i < 50; i++) {
392                 udelay(10);
393
394                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
395                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
396                         udelay(5);
397                         break;
398                 }
399         }
400
401         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
402                 ret = -EBUSY;
403         else
404                 ret = 0;
405
406         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
407                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
408                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
409
410                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
411                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
412
413                 udelay(40);
414         }
415
416         return ret;
417 }
418
419 static void
420 bnx2_disable_int(struct bnx2 *bp)
421 {
422         int i;
423         struct bnx2_napi *bnapi;
424
425         for (i = 0; i < bp->irq_nvecs; i++) {
426                 bnapi = &bp->bnx2_napi[i];
427                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
429         }
430         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
431 }
432
433 static void
434 bnx2_enable_int(struct bnx2 *bp)
435 {
436         int i;
437         struct bnx2_napi *bnapi;
438
439         for (i = 0; i < bp->irq_nvecs; i++) {
440                 bnapi = &bp->bnx2_napi[i];
441
442                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
443                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
444                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
445                        bnapi->last_status_idx);
446
447                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
448                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
449                        bnapi->last_status_idx);
450         }
451         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
452 }
453
454 static void
455 bnx2_disable_int_sync(struct bnx2 *bp)
456 {
457         int i;
458
459         atomic_inc(&bp->intr_sem);
460         bnx2_disable_int(bp);
461         for (i = 0; i < bp->irq_nvecs; i++)
462                 synchronize_irq(bp->irq_tbl[i].vector);
463 }
464
465 static void
466 bnx2_napi_disable(struct bnx2 *bp)
467 {
468         int i;
469
470         for (i = 0; i < bp->irq_nvecs; i++)
471                 napi_disable(&bp->bnx2_napi[i].napi);
472 }
473
474 static void
475 bnx2_napi_enable(struct bnx2 *bp)
476 {
477         int i;
478
479         for (i = 0; i < bp->irq_nvecs; i++)
480                 napi_enable(&bp->bnx2_napi[i].napi);
481 }
482
483 static void
484 bnx2_netif_stop(struct bnx2 *bp)
485 {
486         bnx2_disable_int_sync(bp);
487         if (netif_running(bp->dev)) {
488                 bnx2_napi_disable(bp);
489                 netif_tx_disable(bp->dev);
490                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
491         }
492 }
493
494 static void
495 bnx2_netif_start(struct bnx2 *bp)
496 {
497         if (atomic_dec_and_test(&bp->intr_sem)) {
498                 if (netif_running(bp->dev)) {
499                         netif_tx_wake_all_queues(bp->dev);
500                         bnx2_napi_enable(bp);
501                         bnx2_enable_int(bp);
502                 }
503         }
504 }
505
506 static void
507 bnx2_free_tx_mem(struct bnx2 *bp)
508 {
509         int i;
510
511         for (i = 0; i < bp->num_tx_rings; i++) {
512                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
513                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
514
515                 if (txr->tx_desc_ring) {
516                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
517                                             txr->tx_desc_ring,
518                                             txr->tx_desc_mapping);
519                         txr->tx_desc_ring = NULL;
520                 }
521                 kfree(txr->tx_buf_ring);
522                 txr->tx_buf_ring = NULL;
523         }
524 }
525
526 static void
527 bnx2_free_rx_mem(struct bnx2 *bp)
528 {
529         int i;
530
531         for (i = 0; i < bp->num_rx_rings; i++) {
532                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
533                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
534                 int j;
535
536                 for (j = 0; j < bp->rx_max_ring; j++) {
537                         if (rxr->rx_desc_ring[j])
538                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
539                                                     rxr->rx_desc_ring[j],
540                                                     rxr->rx_desc_mapping[j]);
541                         rxr->rx_desc_ring[j] = NULL;
542                 }
543                 if (rxr->rx_buf_ring)
544                         vfree(rxr->rx_buf_ring);
545                 rxr->rx_buf_ring = NULL;
546
547                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
548                         if (rxr->rx_pg_desc_ring[j])
549                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
550                                                     rxr->rx_pg_desc_ring[j],
551                                                     rxr->rx_pg_desc_mapping[j]);
552                         rxr->rx_pg_desc_ring[j] = NULL;
553                 }
554                 if (rxr->rx_pg_ring)
555                         vfree(rxr->rx_pg_ring);
556                 rxr->rx_pg_ring = NULL;
557         }
558 }
559
560 static int
561 bnx2_alloc_tx_mem(struct bnx2 *bp)
562 {
563         int i;
564
565         for (i = 0; i < bp->num_tx_rings; i++) {
566                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
567                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
568
569                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
570                 if (txr->tx_buf_ring == NULL)
571                         return -ENOMEM;
572
573                 txr->tx_desc_ring =
574                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
575                                              &txr->tx_desc_mapping);
576                 if (txr->tx_desc_ring == NULL)
577                         return -ENOMEM;
578         }
579         return 0;
580 }
581
582 static int
583 bnx2_alloc_rx_mem(struct bnx2 *bp)
584 {
585         int i;
586
587         for (i = 0; i < bp->num_rx_rings; i++) {
588                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
589                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
590                 int j;
591
592                 rxr->rx_buf_ring =
593                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
594                 if (rxr->rx_buf_ring == NULL)
595                         return -ENOMEM;
596
597                 memset(rxr->rx_buf_ring, 0,
598                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
599
600                 for (j = 0; j < bp->rx_max_ring; j++) {
601                         rxr->rx_desc_ring[j] =
602                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
603                                                      &rxr->rx_desc_mapping[j]);
604                         if (rxr->rx_desc_ring[j] == NULL)
605                                 return -ENOMEM;
606
607                 }
608
609                 if (bp->rx_pg_ring_size) {
610                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
611                                                   bp->rx_max_pg_ring);
612                         if (rxr->rx_pg_ring == NULL)
613                                 return -ENOMEM;
614
615                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
616                                bp->rx_max_pg_ring);
617                 }
618
619                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
620                         rxr->rx_pg_desc_ring[j] =
621                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
622                                                 &rxr->rx_pg_desc_mapping[j]);
623                         if (rxr->rx_pg_desc_ring[j] == NULL)
624                                 return -ENOMEM;
625
626                 }
627         }
628         return 0;
629 }
630
631 static void
632 bnx2_free_mem(struct bnx2 *bp)
633 {
634         int i;
635         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
636
637         bnx2_free_tx_mem(bp);
638         bnx2_free_rx_mem(bp);
639
640         for (i = 0; i < bp->ctx_pages; i++) {
641                 if (bp->ctx_blk[i]) {
642                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
643                                             bp->ctx_blk[i],
644                                             bp->ctx_blk_mapping[i]);
645                         bp->ctx_blk[i] = NULL;
646                 }
647         }
648         if (bnapi->status_blk.msi) {
649                 pci_free_consistent(bp->pdev, bp->status_stats_size,
650                                     bnapi->status_blk.msi,
651                                     bp->status_blk_mapping);
652                 bnapi->status_blk.msi = NULL;
653                 bp->stats_blk = NULL;
654         }
655 }
656
657 static int
658 bnx2_alloc_mem(struct bnx2 *bp)
659 {
660         int i, status_blk_size, err;
661         struct bnx2_napi *bnapi;
662         void *status_blk;
663
664         /* Combine status and statistics blocks into one allocation. */
665         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
666         if (bp->flags & BNX2_FLAG_MSIX_CAP)
667                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
668                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
669         bp->status_stats_size = status_blk_size +
670                                 sizeof(struct statistics_block);
671
672         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
673                                           &bp->status_blk_mapping);
674         if (status_blk == NULL)
675                 goto alloc_mem_err;
676
677         memset(status_blk, 0, bp->status_stats_size);
678
679         bnapi = &bp->bnx2_napi[0];
680         bnapi->status_blk.msi = status_blk;
681         bnapi->hw_tx_cons_ptr =
682                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
683         bnapi->hw_rx_cons_ptr =
684                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
685         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
686                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
687                         struct status_block_msix *sblk;
688
689                         bnapi = &bp->bnx2_napi[i];
690
691                         sblk = (void *) (status_blk +
692                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
693                         bnapi->status_blk.msix = sblk;
694                         bnapi->hw_tx_cons_ptr =
695                                 &sblk->status_tx_quick_consumer_index;
696                         bnapi->hw_rx_cons_ptr =
697                                 &sblk->status_rx_quick_consumer_index;
698                         bnapi->int_num = i << 24;
699                 }
700         }
701
702         bp->stats_blk = status_blk + status_blk_size;
703
704         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
705
706         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
707                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
708                 if (bp->ctx_pages == 0)
709                         bp->ctx_pages = 1;
710                 for (i = 0; i < bp->ctx_pages; i++) {
711                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
712                                                 BCM_PAGE_SIZE,
713                                                 &bp->ctx_blk_mapping[i]);
714                         if (bp->ctx_blk[i] == NULL)
715                                 goto alloc_mem_err;
716                 }
717         }
718
719         err = bnx2_alloc_rx_mem(bp);
720         if (err)
721                 goto alloc_mem_err;
722
723         err = bnx2_alloc_tx_mem(bp);
724         if (err)
725                 goto alloc_mem_err;
726
727         return 0;
728
729 alloc_mem_err:
730         bnx2_free_mem(bp);
731         return -ENOMEM;
732 }
733
734 static void
735 bnx2_report_fw_link(struct bnx2 *bp)
736 {
737         u32 fw_link_status = 0;
738
739         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
740                 return;
741
742         if (bp->link_up) {
743                 u32 bmsr;
744
745                 switch (bp->line_speed) {
746                 case SPEED_10:
747                         if (bp->duplex == DUPLEX_HALF)
748                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
749                         else
750                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
751                         break;
752                 case SPEED_100:
753                         if (bp->duplex == DUPLEX_HALF)
754                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
755                         else
756                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
757                         break;
758                 case SPEED_1000:
759                         if (bp->duplex == DUPLEX_HALF)
760                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
761                         else
762                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
763                         break;
764                 case SPEED_2500:
765                         if (bp->duplex == DUPLEX_HALF)
766                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
767                         else
768                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
769                         break;
770                 }
771
772                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
773
774                 if (bp->autoneg) {
775                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
776
777                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
778                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
779
780                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
781                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
782                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
783                         else
784                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
785                 }
786         }
787         else
788                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
789
790         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
791 }
792
793 static char *
794 bnx2_xceiver_str(struct bnx2 *bp)
795 {
796         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
797                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
798                  "Copper"));
799 }
800
801 static void
802 bnx2_report_link(struct bnx2 *bp)
803 {
804         if (bp->link_up) {
805                 netif_carrier_on(bp->dev);
806                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
807                        bnx2_xceiver_str(bp));
808
809                 printk("%d Mbps ", bp->line_speed);
810
811                 if (bp->duplex == DUPLEX_FULL)
812                         printk("full duplex");
813                 else
814                         printk("half duplex");
815
816                 if (bp->flow_ctrl) {
817                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
818                                 printk(", receive ");
819                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
820                                         printk("& transmit ");
821                         }
822                         else {
823                                 printk(", transmit ");
824                         }
825                         printk("flow control ON");
826                 }
827                 printk("\n");
828         }
829         else {
830                 netif_carrier_off(bp->dev);
831                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
832                        bnx2_xceiver_str(bp));
833         }
834
835         bnx2_report_fw_link(bp);
836 }
837
838 static void
839 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
840 {
841         u32 local_adv, remote_adv;
842
843         bp->flow_ctrl = 0;
844         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
845                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
846
847                 if (bp->duplex == DUPLEX_FULL) {
848                         bp->flow_ctrl = bp->req_flow_ctrl;
849                 }
850                 return;
851         }
852
853         if (bp->duplex != DUPLEX_FULL) {
854                 return;
855         }
856
857         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
858             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
859                 u32 val;
860
861                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
862                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
863                         bp->flow_ctrl |= FLOW_CTRL_TX;
864                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
865                         bp->flow_ctrl |= FLOW_CTRL_RX;
866                 return;
867         }
868
869         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
870         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
871
872         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
873                 u32 new_local_adv = 0;
874                 u32 new_remote_adv = 0;
875
876                 if (local_adv & ADVERTISE_1000XPAUSE)
877                         new_local_adv |= ADVERTISE_PAUSE_CAP;
878                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
879                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
880                 if (remote_adv & ADVERTISE_1000XPAUSE)
881                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
882                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
883                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
884
885                 local_adv = new_local_adv;
886                 remote_adv = new_remote_adv;
887         }
888
889         /* See Table 28B-3 of 802.3ab-1999 spec. */
890         if (local_adv & ADVERTISE_PAUSE_CAP) {
891                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
892                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
893                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
894                         }
895                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
896                                 bp->flow_ctrl = FLOW_CTRL_RX;
897                         }
898                 }
899                 else {
900                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
901                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
902                         }
903                 }
904         }
905         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
906                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
907                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
908
909                         bp->flow_ctrl = FLOW_CTRL_TX;
910                 }
911         }
912 }
913
914 static int
915 bnx2_5709s_linkup(struct bnx2 *bp)
916 {
917         u32 val, speed;
918
919         bp->link_up = 1;
920
921         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
922         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
923         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
924
925         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
926                 bp->line_speed = bp->req_line_speed;
927                 bp->duplex = bp->req_duplex;
928                 return 0;
929         }
930         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
931         switch (speed) {
932                 case MII_BNX2_GP_TOP_AN_SPEED_10:
933                         bp->line_speed = SPEED_10;
934                         break;
935                 case MII_BNX2_GP_TOP_AN_SPEED_100:
936                         bp->line_speed = SPEED_100;
937                         break;
938                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
939                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
940                         bp->line_speed = SPEED_1000;
941                         break;
942                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
943                         bp->line_speed = SPEED_2500;
944                         break;
945         }
946         if (val & MII_BNX2_GP_TOP_AN_FD)
947                 bp->duplex = DUPLEX_FULL;
948         else
949                 bp->duplex = DUPLEX_HALF;
950         return 0;
951 }
952
953 static int
954 bnx2_5708s_linkup(struct bnx2 *bp)
955 {
956         u32 val;
957
958         bp->link_up = 1;
959         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
960         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
961                 case BCM5708S_1000X_STAT1_SPEED_10:
962                         bp->line_speed = SPEED_10;
963                         break;
964                 case BCM5708S_1000X_STAT1_SPEED_100:
965                         bp->line_speed = SPEED_100;
966                         break;
967                 case BCM5708S_1000X_STAT1_SPEED_1G:
968                         bp->line_speed = SPEED_1000;
969                         break;
970                 case BCM5708S_1000X_STAT1_SPEED_2G5:
971                         bp->line_speed = SPEED_2500;
972                         break;
973         }
974         if (val & BCM5708S_1000X_STAT1_FD)
975                 bp->duplex = DUPLEX_FULL;
976         else
977                 bp->duplex = DUPLEX_HALF;
978
979         return 0;
980 }
981
982 static int
983 bnx2_5706s_linkup(struct bnx2 *bp)
984 {
985         u32 bmcr, local_adv, remote_adv, common;
986
987         bp->link_up = 1;
988         bp->line_speed = SPEED_1000;
989
990         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
991         if (bmcr & BMCR_FULLDPLX) {
992                 bp->duplex = DUPLEX_FULL;
993         }
994         else {
995                 bp->duplex = DUPLEX_HALF;
996         }
997
998         if (!(bmcr & BMCR_ANENABLE)) {
999                 return 0;
1000         }
1001
1002         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1003         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1004
1005         common = local_adv & remote_adv;
1006         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1007
1008                 if (common & ADVERTISE_1000XFULL) {
1009                         bp->duplex = DUPLEX_FULL;
1010                 }
1011                 else {
1012                         bp->duplex = DUPLEX_HALF;
1013                 }
1014         }
1015
1016         return 0;
1017 }
1018
1019 static int
1020 bnx2_copper_linkup(struct bnx2 *bp)
1021 {
1022         u32 bmcr;
1023
1024         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1025         if (bmcr & BMCR_ANENABLE) {
1026                 u32 local_adv, remote_adv, common;
1027
1028                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1029                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1030
1031                 common = local_adv & (remote_adv >> 2);
1032                 if (common & ADVERTISE_1000FULL) {
1033                         bp->line_speed = SPEED_1000;
1034                         bp->duplex = DUPLEX_FULL;
1035                 }
1036                 else if (common & ADVERTISE_1000HALF) {
1037                         bp->line_speed = SPEED_1000;
1038                         bp->duplex = DUPLEX_HALF;
1039                 }
1040                 else {
1041                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1042                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1043
1044                         common = local_adv & remote_adv;
1045                         if (common & ADVERTISE_100FULL) {
1046                                 bp->line_speed = SPEED_100;
1047                                 bp->duplex = DUPLEX_FULL;
1048                         }
1049                         else if (common & ADVERTISE_100HALF) {
1050                                 bp->line_speed = SPEED_100;
1051                                 bp->duplex = DUPLEX_HALF;
1052                         }
1053                         else if (common & ADVERTISE_10FULL) {
1054                                 bp->line_speed = SPEED_10;
1055                                 bp->duplex = DUPLEX_FULL;
1056                         }
1057                         else if (common & ADVERTISE_10HALF) {
1058                                 bp->line_speed = SPEED_10;
1059                                 bp->duplex = DUPLEX_HALF;
1060                         }
1061                         else {
1062                                 bp->line_speed = 0;
1063                                 bp->link_up = 0;
1064                         }
1065                 }
1066         }
1067         else {
1068                 if (bmcr & BMCR_SPEED100) {
1069                         bp->line_speed = SPEED_100;
1070                 }
1071                 else {
1072                         bp->line_speed = SPEED_10;
1073                 }
1074                 if (bmcr & BMCR_FULLDPLX) {
1075                         bp->duplex = DUPLEX_FULL;
1076                 }
1077                 else {
1078                         bp->duplex = DUPLEX_HALF;
1079                 }
1080         }
1081
1082         return 0;
1083 }
1084
1085 static void
1086 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1087 {
1088         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1089
1090         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1091         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1092         val |= 0x02 << 8;
1093
1094         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1095                 u32 lo_water, hi_water;
1096
1097                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1098                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1099                 else
1100                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1101                 if (lo_water >= bp->rx_ring_size)
1102                         lo_water = 0;
1103
1104                 hi_water = bp->rx_ring_size / 4;
1105
1106                 if (hi_water <= lo_water)
1107                         lo_water = 0;
1108
1109                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1110                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1111
1112                 if (hi_water > 0xf)
1113                         hi_water = 0xf;
1114                 else if (hi_water == 0)
1115                         lo_water = 0;
1116                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1117         }
1118         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1119 }
1120
1121 static void
1122 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1123 {
1124         int i;
1125         u32 cid;
1126
1127         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1128                 if (i == 1)
1129                         cid = RX_RSS_CID;
1130                 bnx2_init_rx_context(bp, cid);
1131         }
1132 }
1133
1134 static void
1135 bnx2_set_mac_link(struct bnx2 *bp)
1136 {
1137         u32 val;
1138
1139         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1140         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1141                 (bp->duplex == DUPLEX_HALF)) {
1142                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1143         }
1144
1145         /* Configure the EMAC mode register. */
1146         val = REG_RD(bp, BNX2_EMAC_MODE);
1147
1148         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1149                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1150                 BNX2_EMAC_MODE_25G_MODE);
1151
1152         if (bp->link_up) {
1153                 switch (bp->line_speed) {
1154                         case SPEED_10:
1155                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1156                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1157                                         break;
1158                                 }
1159                                 /* fall through */
1160                         case SPEED_100:
1161                                 val |= BNX2_EMAC_MODE_PORT_MII;
1162                                 break;
1163                         case SPEED_2500:
1164                                 val |= BNX2_EMAC_MODE_25G_MODE;
1165                                 /* fall through */
1166                         case SPEED_1000:
1167                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1168                                 break;
1169                 }
1170         }
1171         else {
1172                 val |= BNX2_EMAC_MODE_PORT_GMII;
1173         }
1174
1175         /* Set the MAC to operate in the appropriate duplex mode. */
1176         if (bp->duplex == DUPLEX_HALF)
1177                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1178         REG_WR(bp, BNX2_EMAC_MODE, val);
1179
1180         /* Enable/disable rx PAUSE. */
1181         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1182
1183         if (bp->flow_ctrl & FLOW_CTRL_RX)
1184                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1185         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1186
1187         /* Enable/disable tx PAUSE. */
1188         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1189         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1190
1191         if (bp->flow_ctrl & FLOW_CTRL_TX)
1192                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1193         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1194
1195         /* Acknowledge the interrupt. */
1196         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1197
1198         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1199                 bnx2_init_all_rx_contexts(bp);
1200 }
1201
1202 static void
1203 bnx2_enable_bmsr1(struct bnx2 *bp)
1204 {
1205         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1206             (CHIP_NUM(bp) == CHIP_NUM_5709))
1207                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1208                                MII_BNX2_BLK_ADDR_GP_STATUS);
1209 }
1210
1211 static void
1212 bnx2_disable_bmsr1(struct bnx2 *bp)
1213 {
1214         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1215             (CHIP_NUM(bp) == CHIP_NUM_5709))
1216                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1217                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1218 }
1219
1220 static int
1221 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1222 {
1223         u32 up1;
1224         int ret = 1;
1225
1226         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1227                 return 0;
1228
1229         if (bp->autoneg & AUTONEG_SPEED)
1230                 bp->advertising |= ADVERTISED_2500baseX_Full;
1231
1232         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1233                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1234
1235         bnx2_read_phy(bp, bp->mii_up1, &up1);
1236         if (!(up1 & BCM5708S_UP1_2G5)) {
1237                 up1 |= BCM5708S_UP1_2G5;
1238                 bnx2_write_phy(bp, bp->mii_up1, up1);
1239                 ret = 0;
1240         }
1241
1242         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1243                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1244                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1245
1246         return ret;
1247 }
1248
1249 static int
1250 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1251 {
1252         u32 up1;
1253         int ret = 0;
1254
1255         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1256                 return 0;
1257
1258         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1259                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1260
1261         bnx2_read_phy(bp, bp->mii_up1, &up1);
1262         if (up1 & BCM5708S_UP1_2G5) {
1263                 up1 &= ~BCM5708S_UP1_2G5;
1264                 bnx2_write_phy(bp, bp->mii_up1, up1);
1265                 ret = 1;
1266         }
1267
1268         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1269                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1270                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1271
1272         return ret;
1273 }
1274
1275 static void
1276 bnx2_enable_forced_2g5(struct bnx2 *bp)
1277 {
1278         u32 bmcr;
1279
1280         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1281                 return;
1282
1283         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1284                 u32 val;
1285
1286                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1287                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1288                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1289                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1290                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1291                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1292
1293                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1294                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1295                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1296
1297         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1298                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1299                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1300         }
1301
1302         if (bp->autoneg & AUTONEG_SPEED) {
1303                 bmcr &= ~BMCR_ANENABLE;
1304                 if (bp->req_duplex == DUPLEX_FULL)
1305                         bmcr |= BMCR_FULLDPLX;
1306         }
1307         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1308 }
1309
1310 static void
1311 bnx2_disable_forced_2g5(struct bnx2 *bp)
1312 {
1313         u32 bmcr;
1314
1315         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1316                 return;
1317
1318         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1319                 u32 val;
1320
1321                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1322                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1323                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1324                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1325                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1326
1327                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1328                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1329                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1330
1331         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1332                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1333                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1334         }
1335
1336         if (bp->autoneg & AUTONEG_SPEED)
1337                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1338         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1339 }
1340
1341 static void
1342 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1343 {
1344         u32 val;
1345
1346         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1347         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1348         if (start)
1349                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1350         else
1351                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1352 }
1353
1354 static int
1355 bnx2_set_link(struct bnx2 *bp)
1356 {
1357         u32 bmsr;
1358         u8 link_up;
1359
1360         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1361                 bp->link_up = 1;
1362                 return 0;
1363         }
1364
1365         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1366                 return 0;
1367
1368         link_up = bp->link_up;
1369
1370         bnx2_enable_bmsr1(bp);
1371         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1372         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1373         bnx2_disable_bmsr1(bp);
1374
1375         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1376             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1377                 u32 val, an_dbg;
1378
1379                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1380                         bnx2_5706s_force_link_dn(bp, 0);
1381                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1382                 }
1383                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1384
1385                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1386                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1387                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1388
1389                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1390                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1391                         bmsr |= BMSR_LSTATUS;
1392                 else
1393                         bmsr &= ~BMSR_LSTATUS;
1394         }
1395
1396         if (bmsr & BMSR_LSTATUS) {
1397                 bp->link_up = 1;
1398
1399                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1400                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1401                                 bnx2_5706s_linkup(bp);
1402                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1403                                 bnx2_5708s_linkup(bp);
1404                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1405                                 bnx2_5709s_linkup(bp);
1406                 }
1407                 else {
1408                         bnx2_copper_linkup(bp);
1409                 }
1410                 bnx2_resolve_flow_ctrl(bp);
1411         }
1412         else {
1413                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1414                     (bp->autoneg & AUTONEG_SPEED))
1415                         bnx2_disable_forced_2g5(bp);
1416
1417                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1418                         u32 bmcr;
1419
1420                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1421                         bmcr |= BMCR_ANENABLE;
1422                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1423
1424                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1425                 }
1426                 bp->link_up = 0;
1427         }
1428
1429         if (bp->link_up != link_up) {
1430                 bnx2_report_link(bp);
1431         }
1432
1433         bnx2_set_mac_link(bp);
1434
1435         return 0;
1436 }
1437
1438 static int
1439 bnx2_reset_phy(struct bnx2 *bp)
1440 {
1441         int i;
1442         u32 reg;
1443
1444         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1445
1446 #define PHY_RESET_MAX_WAIT 100
1447         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1448                 udelay(10);
1449
1450                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1451                 if (!(reg & BMCR_RESET)) {
1452                         udelay(20);
1453                         break;
1454                 }
1455         }
1456         if (i == PHY_RESET_MAX_WAIT) {
1457                 return -EBUSY;
1458         }
1459         return 0;
1460 }
1461
1462 static u32
1463 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1464 {
1465         u32 adv = 0;
1466
1467         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1468                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1469
1470                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1471                         adv = ADVERTISE_1000XPAUSE;
1472                 }
1473                 else {
1474                         adv = ADVERTISE_PAUSE_CAP;
1475                 }
1476         }
1477         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1478                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1479                         adv = ADVERTISE_1000XPSE_ASYM;
1480                 }
1481                 else {
1482                         adv = ADVERTISE_PAUSE_ASYM;
1483                 }
1484         }
1485         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1486                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1487                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1488                 }
1489                 else {
1490                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1491                 }
1492         }
1493         return adv;
1494 }
1495
1496 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1497
1498 static int
1499 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1500 __releases(&bp->phy_lock)
1501 __acquires(&bp->phy_lock)
1502 {
1503         u32 speed_arg = 0, pause_adv;
1504
1505         pause_adv = bnx2_phy_get_pause_adv(bp);
1506
1507         if (bp->autoneg & AUTONEG_SPEED) {
1508                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1509                 if (bp->advertising & ADVERTISED_10baseT_Half)
1510                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1511                 if (bp->advertising & ADVERTISED_10baseT_Full)
1512                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1513                 if (bp->advertising & ADVERTISED_100baseT_Half)
1514                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1515                 if (bp->advertising & ADVERTISED_100baseT_Full)
1516                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1517                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1518                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1519                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1520                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1521         } else {
1522                 if (bp->req_line_speed == SPEED_2500)
1523                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1524                 else if (bp->req_line_speed == SPEED_1000)
1525                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1526                 else if (bp->req_line_speed == SPEED_100) {
1527                         if (bp->req_duplex == DUPLEX_FULL)
1528                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1529                         else
1530                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1531                 } else if (bp->req_line_speed == SPEED_10) {
1532                         if (bp->req_duplex == DUPLEX_FULL)
1533                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1534                         else
1535                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1536                 }
1537         }
1538
1539         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1540                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1541         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1542                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1543
1544         if (port == PORT_TP)
1545                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1546                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1547
1548         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1549
1550         spin_unlock_bh(&bp->phy_lock);
1551         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1552         spin_lock_bh(&bp->phy_lock);
1553
1554         return 0;
1555 }
1556
1557 static int
1558 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1559 __releases(&bp->phy_lock)
1560 __acquires(&bp->phy_lock)
1561 {
1562         u32 adv, bmcr;
1563         u32 new_adv = 0;
1564
1565         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1566                 return (bnx2_setup_remote_phy(bp, port));
1567
1568         if (!(bp->autoneg & AUTONEG_SPEED)) {
1569                 u32 new_bmcr;
1570                 int force_link_down = 0;
1571
1572                 if (bp->req_line_speed == SPEED_2500) {
1573                         if (!bnx2_test_and_enable_2g5(bp))
1574                                 force_link_down = 1;
1575                 } else if (bp->req_line_speed == SPEED_1000) {
1576                         if (bnx2_test_and_disable_2g5(bp))
1577                                 force_link_down = 1;
1578                 }
1579                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1580                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1581
1582                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1583                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1584                 new_bmcr |= BMCR_SPEED1000;
1585
1586                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1587                         if (bp->req_line_speed == SPEED_2500)
1588                                 bnx2_enable_forced_2g5(bp);
1589                         else if (bp->req_line_speed == SPEED_1000) {
1590                                 bnx2_disable_forced_2g5(bp);
1591                                 new_bmcr &= ~0x2000;
1592                         }
1593
1594                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1595                         if (bp->req_line_speed == SPEED_2500)
1596                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1597                         else
1598                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1599                 }
1600
1601                 if (bp->req_duplex == DUPLEX_FULL) {
1602                         adv |= ADVERTISE_1000XFULL;
1603                         new_bmcr |= BMCR_FULLDPLX;
1604                 }
1605                 else {
1606                         adv |= ADVERTISE_1000XHALF;
1607                         new_bmcr &= ~BMCR_FULLDPLX;
1608                 }
1609                 if ((new_bmcr != bmcr) || (force_link_down)) {
1610                         /* Force a link down visible on the other side */
1611                         if (bp->link_up) {
1612                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1613                                                ~(ADVERTISE_1000XFULL |
1614                                                  ADVERTISE_1000XHALF));
1615                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1616                                         BMCR_ANRESTART | BMCR_ANENABLE);
1617
1618                                 bp->link_up = 0;
1619                                 netif_carrier_off(bp->dev);
1620                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1621                                 bnx2_report_link(bp);
1622                         }
1623                         bnx2_write_phy(bp, bp->mii_adv, adv);
1624                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1625                 } else {
1626                         bnx2_resolve_flow_ctrl(bp);
1627                         bnx2_set_mac_link(bp);
1628                 }
1629                 return 0;
1630         }
1631
1632         bnx2_test_and_enable_2g5(bp);
1633
1634         if (bp->advertising & ADVERTISED_1000baseT_Full)
1635                 new_adv |= ADVERTISE_1000XFULL;
1636
1637         new_adv |= bnx2_phy_get_pause_adv(bp);
1638
1639         bnx2_read_phy(bp, bp->mii_adv, &adv);
1640         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1641
1642         bp->serdes_an_pending = 0;
1643         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1644                 /* Force a link down visible on the other side */
1645                 if (bp->link_up) {
1646                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1647                         spin_unlock_bh(&bp->phy_lock);
1648                         msleep(20);
1649                         spin_lock_bh(&bp->phy_lock);
1650                 }
1651
1652                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1653                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1654                         BMCR_ANENABLE);
1655                 /* Speed up link-up time when the link partner
1656                  * does not autonegotiate which is very common
1657                  * in blade servers. Some blade servers use
1658                  * IPMI for kerboard input and it's important
1659                  * to minimize link disruptions. Autoneg. involves
1660                  * exchanging base pages plus 3 next pages and
1661                  * normally completes in about 120 msec.
1662                  */
1663                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1664                 bp->serdes_an_pending = 1;
1665                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1666         } else {
1667                 bnx2_resolve_flow_ctrl(bp);
1668                 bnx2_set_mac_link(bp);
1669         }
1670
1671         return 0;
1672 }
1673
1674 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1675         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1676                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1677                 (ADVERTISED_1000baseT_Full)
1678
1679 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1680         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1681         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1682         ADVERTISED_1000baseT_Full)
1683
1684 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1685         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1686
1687 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1688
1689 static void
1690 bnx2_set_default_remote_link(struct bnx2 *bp)
1691 {
1692         u32 link;
1693
1694         if (bp->phy_port == PORT_TP)
1695                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1696         else
1697                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1698
1699         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1700                 bp->req_line_speed = 0;
1701                 bp->autoneg |= AUTONEG_SPEED;
1702                 bp->advertising = ADVERTISED_Autoneg;
1703                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1704                         bp->advertising |= ADVERTISED_10baseT_Half;
1705                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1706                         bp->advertising |= ADVERTISED_10baseT_Full;
1707                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1708                         bp->advertising |= ADVERTISED_100baseT_Half;
1709                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1710                         bp->advertising |= ADVERTISED_100baseT_Full;
1711                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1712                         bp->advertising |= ADVERTISED_1000baseT_Full;
1713                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1714                         bp->advertising |= ADVERTISED_2500baseX_Full;
1715         } else {
1716                 bp->autoneg = 0;
1717                 bp->advertising = 0;
1718                 bp->req_duplex = DUPLEX_FULL;
1719                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1720                         bp->req_line_speed = SPEED_10;
1721                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1722                                 bp->req_duplex = DUPLEX_HALF;
1723                 }
1724                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1725                         bp->req_line_speed = SPEED_100;
1726                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1727                                 bp->req_duplex = DUPLEX_HALF;
1728                 }
1729                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1730                         bp->req_line_speed = SPEED_1000;
1731                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1732                         bp->req_line_speed = SPEED_2500;
1733         }
1734 }
1735
1736 static void
1737 bnx2_set_default_link(struct bnx2 *bp)
1738 {
1739         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1740                 bnx2_set_default_remote_link(bp);
1741                 return;
1742         }
1743
1744         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1745         bp->req_line_speed = 0;
1746         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1747                 u32 reg;
1748
1749                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1750
1751                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1752                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1753                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1754                         bp->autoneg = 0;
1755                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1756                         bp->req_duplex = DUPLEX_FULL;
1757                 }
1758         } else
1759                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1760 }
1761
1762 static void
1763 bnx2_send_heart_beat(struct bnx2 *bp)
1764 {
1765         u32 msg;
1766         u32 addr;
1767
1768         spin_lock(&bp->indirect_lock);
1769         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1770         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1771         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1772         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1773         spin_unlock(&bp->indirect_lock);
1774 }
1775
1776 static void
1777 bnx2_remote_phy_event(struct bnx2 *bp)
1778 {
1779         u32 msg;
1780         u8 link_up = bp->link_up;
1781         u8 old_port;
1782
1783         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1784
1785         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1786                 bnx2_send_heart_beat(bp);
1787
1788         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1789
1790         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1791                 bp->link_up = 0;
1792         else {
1793                 u32 speed;
1794
1795                 bp->link_up = 1;
1796                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1797                 bp->duplex = DUPLEX_FULL;
1798                 switch (speed) {
1799                         case BNX2_LINK_STATUS_10HALF:
1800                                 bp->duplex = DUPLEX_HALF;
1801                         case BNX2_LINK_STATUS_10FULL:
1802                                 bp->line_speed = SPEED_10;
1803                                 break;
1804                         case BNX2_LINK_STATUS_100HALF:
1805                                 bp->duplex = DUPLEX_HALF;
1806                         case BNX2_LINK_STATUS_100BASE_T4:
1807                         case BNX2_LINK_STATUS_100FULL:
1808                                 bp->line_speed = SPEED_100;
1809                                 break;
1810                         case BNX2_LINK_STATUS_1000HALF:
1811                                 bp->duplex = DUPLEX_HALF;
1812                         case BNX2_LINK_STATUS_1000FULL:
1813                                 bp->line_speed = SPEED_1000;
1814                                 break;
1815                         case BNX2_LINK_STATUS_2500HALF:
1816                                 bp->duplex = DUPLEX_HALF;
1817                         case BNX2_LINK_STATUS_2500FULL:
1818                                 bp->line_speed = SPEED_2500;
1819                                 break;
1820                         default:
1821                                 bp->line_speed = 0;
1822                                 break;
1823                 }
1824
1825                 bp->flow_ctrl = 0;
1826                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1827                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1828                         if (bp->duplex == DUPLEX_FULL)
1829                                 bp->flow_ctrl = bp->req_flow_ctrl;
1830                 } else {
1831                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1832                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1833                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1834                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1835                 }
1836
1837                 old_port = bp->phy_port;
1838                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1839                         bp->phy_port = PORT_FIBRE;
1840                 else
1841                         bp->phy_port = PORT_TP;
1842
1843                 if (old_port != bp->phy_port)
1844                         bnx2_set_default_link(bp);
1845
1846         }
1847         if (bp->link_up != link_up)
1848                 bnx2_report_link(bp);
1849
1850         bnx2_set_mac_link(bp);
1851 }
1852
1853 static int
1854 bnx2_set_remote_link(struct bnx2 *bp)
1855 {
1856         u32 evt_code;
1857
1858         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1859         switch (evt_code) {
1860                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1861                         bnx2_remote_phy_event(bp);
1862                         break;
1863                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1864                 default:
1865                         bnx2_send_heart_beat(bp);
1866                         break;
1867         }
1868         return 0;
1869 }
1870
1871 static int
1872 bnx2_setup_copper_phy(struct bnx2 *bp)
1873 __releases(&bp->phy_lock)
1874 __acquires(&bp->phy_lock)
1875 {
1876         u32 bmcr;
1877         u32 new_bmcr;
1878
1879         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1880
1881         if (bp->autoneg & AUTONEG_SPEED) {
1882                 u32 adv_reg, adv1000_reg;
1883                 u32 new_adv_reg = 0;
1884                 u32 new_adv1000_reg = 0;
1885
1886                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1887                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1888                         ADVERTISE_PAUSE_ASYM);
1889
1890                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1891                 adv1000_reg &= PHY_ALL_1000_SPEED;
1892
1893                 if (bp->advertising & ADVERTISED_10baseT_Half)
1894                         new_adv_reg |= ADVERTISE_10HALF;
1895                 if (bp->advertising & ADVERTISED_10baseT_Full)
1896                         new_adv_reg |= ADVERTISE_10FULL;
1897                 if (bp->advertising & ADVERTISED_100baseT_Half)
1898                         new_adv_reg |= ADVERTISE_100HALF;
1899                 if (bp->advertising & ADVERTISED_100baseT_Full)
1900                         new_adv_reg |= ADVERTISE_100FULL;
1901                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1902                         new_adv1000_reg |= ADVERTISE_1000FULL;
1903
1904                 new_adv_reg |= ADVERTISE_CSMA;
1905
1906                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1907
1908                 if ((adv1000_reg != new_adv1000_reg) ||
1909                         (adv_reg != new_adv_reg) ||
1910                         ((bmcr & BMCR_ANENABLE) == 0)) {
1911
1912                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1913                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1914                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1915                                 BMCR_ANENABLE);
1916                 }
1917                 else if (bp->link_up) {
1918                         /* Flow ctrl may have changed from auto to forced */
1919                         /* or vice-versa. */
1920
1921                         bnx2_resolve_flow_ctrl(bp);
1922                         bnx2_set_mac_link(bp);
1923                 }
1924                 return 0;
1925         }
1926
1927         new_bmcr = 0;
1928         if (bp->req_line_speed == SPEED_100) {
1929                 new_bmcr |= BMCR_SPEED100;
1930         }
1931         if (bp->req_duplex == DUPLEX_FULL) {
1932                 new_bmcr |= BMCR_FULLDPLX;
1933         }
1934         if (new_bmcr != bmcr) {
1935                 u32 bmsr;
1936
1937                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1938                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1939
1940                 if (bmsr & BMSR_LSTATUS) {
1941                         /* Force link down */
1942                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1943                         spin_unlock_bh(&bp->phy_lock);
1944                         msleep(50);
1945                         spin_lock_bh(&bp->phy_lock);
1946
1947                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1948                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1949                 }
1950
1951                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1952
1953                 /* Normally, the new speed is setup after the link has
1954                  * gone down and up again. In some cases, link will not go
1955                  * down so we need to set up the new speed here.
1956                  */
1957                 if (bmsr & BMSR_LSTATUS) {
1958                         bp->line_speed = bp->req_line_speed;
1959                         bp->duplex = bp->req_duplex;
1960                         bnx2_resolve_flow_ctrl(bp);
1961                         bnx2_set_mac_link(bp);
1962                 }
1963         } else {
1964                 bnx2_resolve_flow_ctrl(bp);
1965                 bnx2_set_mac_link(bp);
1966         }
1967         return 0;
1968 }
1969
1970 static int
1971 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1972 __releases(&bp->phy_lock)
1973 __acquires(&bp->phy_lock)
1974 {
1975         if (bp->loopback == MAC_LOOPBACK)
1976                 return 0;
1977
1978         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1979                 return (bnx2_setup_serdes_phy(bp, port));
1980         }
1981         else {
1982                 return (bnx2_setup_copper_phy(bp));
1983         }
1984 }
1985
1986 static int
1987 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1988 {
1989         u32 val;
1990
1991         bp->mii_bmcr = MII_BMCR + 0x10;
1992         bp->mii_bmsr = MII_BMSR + 0x10;
1993         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1994         bp->mii_adv = MII_ADVERTISE + 0x10;
1995         bp->mii_lpa = MII_LPA + 0x10;
1996         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1997
1998         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1999         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2000
2001         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2002         if (reset_phy)
2003                 bnx2_reset_phy(bp);
2004
2005         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2006
2007         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2008         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2009         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2010         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2011
2012         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2013         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2014         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2015                 val |= BCM5708S_UP1_2G5;
2016         else
2017                 val &= ~BCM5708S_UP1_2G5;
2018         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2019
2020         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2021         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2022         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2023         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2024
2025         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2026
2027         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2028               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2029         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2030
2031         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2032
2033         return 0;
2034 }
2035
2036 static int
2037 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2038 {
2039         u32 val;
2040
2041         if (reset_phy)
2042                 bnx2_reset_phy(bp);
2043
2044         bp->mii_up1 = BCM5708S_UP1;
2045
2046         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2047         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2048         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2049
2050         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2051         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2052         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2053
2054         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2055         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2056         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2057
2058         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2059                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2060                 val |= BCM5708S_UP1_2G5;
2061                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2062         }
2063
2064         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2065             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2066             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2067                 /* increase tx signal amplitude */
2068                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2069                                BCM5708S_BLK_ADDR_TX_MISC);
2070                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2071                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2072                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2073                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2074         }
2075
2076         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2077               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2078
2079         if (val) {
2080                 u32 is_backplane;
2081
2082                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2083                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2084                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2085                                        BCM5708S_BLK_ADDR_TX_MISC);
2086                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2087                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2088                                        BCM5708S_BLK_ADDR_DIG);
2089                 }
2090         }
2091         return 0;
2092 }
2093
2094 static int
2095 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2096 {
2097         if (reset_phy)
2098                 bnx2_reset_phy(bp);
2099
2100         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2101
2102         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2103                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2104
2105         if (bp->dev->mtu > 1500) {
2106                 u32 val;
2107
2108                 /* Set extended packet length bit */
2109                 bnx2_write_phy(bp, 0x18, 0x7);
2110                 bnx2_read_phy(bp, 0x18, &val);
2111                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2112
2113                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2114                 bnx2_read_phy(bp, 0x1c, &val);
2115                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2116         }
2117         else {
2118                 u32 val;
2119
2120                 bnx2_write_phy(bp, 0x18, 0x7);
2121                 bnx2_read_phy(bp, 0x18, &val);
2122                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2123
2124                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2125                 bnx2_read_phy(bp, 0x1c, &val);
2126                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2127         }
2128
2129         return 0;
2130 }
2131
2132 static int
2133 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2134 {
2135         u32 val;
2136
2137         if (reset_phy)
2138                 bnx2_reset_phy(bp);
2139
2140         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2141                 bnx2_write_phy(bp, 0x18, 0x0c00);
2142                 bnx2_write_phy(bp, 0x17, 0x000a);
2143                 bnx2_write_phy(bp, 0x15, 0x310b);
2144                 bnx2_write_phy(bp, 0x17, 0x201f);
2145                 bnx2_write_phy(bp, 0x15, 0x9506);
2146                 bnx2_write_phy(bp, 0x17, 0x401f);
2147                 bnx2_write_phy(bp, 0x15, 0x14e2);
2148                 bnx2_write_phy(bp, 0x18, 0x0400);
2149         }
2150
2151         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2152                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2153                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2154                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2155                 val &= ~(1 << 8);
2156                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2157         }
2158
2159         if (bp->dev->mtu > 1500) {
2160                 /* Set extended packet length bit */
2161                 bnx2_write_phy(bp, 0x18, 0x7);
2162                 bnx2_read_phy(bp, 0x18, &val);
2163                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2164
2165                 bnx2_read_phy(bp, 0x10, &val);
2166                 bnx2_write_phy(bp, 0x10, val | 0x1);
2167         }
2168         else {
2169                 bnx2_write_phy(bp, 0x18, 0x7);
2170                 bnx2_read_phy(bp, 0x18, &val);
2171                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2172
2173                 bnx2_read_phy(bp, 0x10, &val);
2174                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2175         }
2176
2177         /* ethernet@wirespeed */
2178         bnx2_write_phy(bp, 0x18, 0x7007);
2179         bnx2_read_phy(bp, 0x18, &val);
2180         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2181         return 0;
2182 }
2183
2184
2185 static int
2186 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2187 __releases(&bp->phy_lock)
2188 __acquires(&bp->phy_lock)
2189 {
2190         u32 val;
2191         int rc = 0;
2192
2193         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2194         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2195
2196         bp->mii_bmcr = MII_BMCR;
2197         bp->mii_bmsr = MII_BMSR;
2198         bp->mii_bmsr1 = MII_BMSR;
2199         bp->mii_adv = MII_ADVERTISE;
2200         bp->mii_lpa = MII_LPA;
2201
2202         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2203
2204         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2205                 goto setup_phy;
2206
2207         bnx2_read_phy(bp, MII_PHYSID1, &val);
2208         bp->phy_id = val << 16;
2209         bnx2_read_phy(bp, MII_PHYSID2, &val);
2210         bp->phy_id |= val & 0xffff;
2211
2212         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2213                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2214                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2215                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2216                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2217                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2218                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2219         }
2220         else {
2221                 rc = bnx2_init_copper_phy(bp, reset_phy);
2222         }
2223
2224 setup_phy:
2225         if (!rc)
2226                 rc = bnx2_setup_phy(bp, bp->phy_port);
2227
2228         return rc;
2229 }
2230
2231 static int
2232 bnx2_set_mac_loopback(struct bnx2 *bp)
2233 {
2234         u32 mac_mode;
2235
2236         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2237         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2238         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2239         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2240         bp->link_up = 1;
2241         return 0;
2242 }
2243
2244 static int bnx2_test_link(struct bnx2 *);
2245
2246 static int
2247 bnx2_set_phy_loopback(struct bnx2 *bp)
2248 {
2249         u32 mac_mode;
2250         int rc, i;
2251
2252         spin_lock_bh(&bp->phy_lock);
2253         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2254                             BMCR_SPEED1000);
2255         spin_unlock_bh(&bp->phy_lock);
2256         if (rc)
2257                 return rc;
2258
2259         for (i = 0; i < 10; i++) {
2260                 if (bnx2_test_link(bp) == 0)
2261                         break;
2262                 msleep(100);
2263         }
2264
2265         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2266         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2267                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2268                       BNX2_EMAC_MODE_25G_MODE);
2269
2270         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2271         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2272         bp->link_up = 1;
2273         return 0;
2274 }
2275
2276 static int
2277 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2278 {
2279         int i;
2280         u32 val;
2281
2282         bp->fw_wr_seq++;
2283         msg_data |= bp->fw_wr_seq;
2284
2285         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2286
2287         if (!ack)
2288                 return 0;
2289
2290         /* wait for an acknowledgement. */
2291         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2292                 msleep(10);
2293
2294                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2295
2296                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2297                         break;
2298         }
2299         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2300                 return 0;
2301
2302         /* If we timed out, inform the firmware that this is the case. */
2303         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2304                 if (!silent)
2305                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2306                                             "%x\n", msg_data);
2307
2308                 msg_data &= ~BNX2_DRV_MSG_CODE;
2309                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2310
2311                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2312
2313                 return -EBUSY;
2314         }
2315
2316         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2317                 return -EIO;
2318
2319         return 0;
2320 }
2321
2322 static int
2323 bnx2_init_5709_context(struct bnx2 *bp)
2324 {
2325         int i, ret = 0;
2326         u32 val;
2327
2328         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2329         val |= (BCM_PAGE_BITS - 8) << 16;
2330         REG_WR(bp, BNX2_CTX_COMMAND, val);
2331         for (i = 0; i < 10; i++) {
2332                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2333                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2334                         break;
2335                 udelay(2);
2336         }
2337         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2338                 return -EBUSY;
2339
2340         for (i = 0; i < bp->ctx_pages; i++) {
2341                 int j;
2342
2343                 if (bp->ctx_blk[i])
2344                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2345                 else
2346                         return -ENOMEM;
2347
2348                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2349                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2350                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2351                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2352                        (u64) bp->ctx_blk_mapping[i] >> 32);
2353                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2354                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2355                 for (j = 0; j < 10; j++) {
2356
2357                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2358                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2359                                 break;
2360                         udelay(5);
2361                 }
2362                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2363                         ret = -EBUSY;
2364                         break;
2365                 }
2366         }
2367         return ret;
2368 }
2369
2370 static void
2371 bnx2_init_context(struct bnx2 *bp)
2372 {
2373         u32 vcid;
2374
2375         vcid = 96;
2376         while (vcid) {
2377                 u32 vcid_addr, pcid_addr, offset;
2378                 int i;
2379
2380                 vcid--;
2381
2382                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2383                         u32 new_vcid;
2384
2385                         vcid_addr = GET_PCID_ADDR(vcid);
2386                         if (vcid & 0x8) {
2387                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2388                         }
2389                         else {
2390                                 new_vcid = vcid;
2391                         }
2392                         pcid_addr = GET_PCID_ADDR(new_vcid);
2393                 }
2394                 else {
2395                         vcid_addr = GET_CID_ADDR(vcid);
2396                         pcid_addr = vcid_addr;
2397                 }
2398
2399                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2400                         vcid_addr += (i << PHY_CTX_SHIFT);
2401                         pcid_addr += (i << PHY_CTX_SHIFT);
2402
2403                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2404                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2405
2406                         /* Zero out the context. */
2407                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2408                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2409                 }
2410         }
2411 }
2412
2413 static int
2414 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2415 {
2416         u16 *good_mbuf;
2417         u32 good_mbuf_cnt;
2418         u32 val;
2419
2420         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2421         if (good_mbuf == NULL) {
2422                 printk(KERN_ERR PFX "Failed to allocate memory in "
2423                                     "bnx2_alloc_bad_rbuf\n");
2424                 return -ENOMEM;
2425         }
2426
2427         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2428                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2429
2430         good_mbuf_cnt = 0;
2431
2432         /* Allocate a bunch of mbufs and save the good ones in an array. */
2433         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2434         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2435                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2436                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2437
2438                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2439
2440                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2441
2442                 /* The addresses with Bit 9 set are bad memory blocks. */
2443                 if (!(val & (1 << 9))) {
2444                         good_mbuf[good_mbuf_cnt] = (u16) val;
2445                         good_mbuf_cnt++;
2446                 }
2447
2448                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2449         }
2450
2451         /* Free the good ones back to the mbuf pool thus discarding
2452          * all the bad ones. */
2453         while (good_mbuf_cnt) {
2454                 good_mbuf_cnt--;
2455
2456                 val = good_mbuf[good_mbuf_cnt];
2457                 val = (val << 9) | val | 1;
2458
2459                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2460         }
2461         kfree(good_mbuf);
2462         return 0;
2463 }
2464
2465 static void
2466 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2467 {
2468         u32 val;
2469
2470         val = (mac_addr[0] << 8) | mac_addr[1];
2471
2472         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2473
2474         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2475                 (mac_addr[4] << 8) | mac_addr[5];
2476
2477         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2478 }
2479
2480 static inline int
2481 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2482 {
2483         dma_addr_t mapping;
2484         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2485         struct rx_bd *rxbd =
2486                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2487         struct page *page = alloc_page(GFP_ATOMIC);
2488
2489         if (!page)
2490                 return -ENOMEM;
2491         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2492                                PCI_DMA_FROMDEVICE);
2493         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2494                 __free_page(page);
2495                 return -EIO;
2496         }
2497
2498         rx_pg->page = page;
2499         pci_unmap_addr_set(rx_pg, mapping, mapping);
2500         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2501         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2502         return 0;
2503 }
2504
2505 static void
2506 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2507 {
2508         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2509         struct page *page = rx_pg->page;
2510
2511         if (!page)
2512                 return;
2513
2514         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2515                        PCI_DMA_FROMDEVICE);
2516
2517         __free_page(page);
2518         rx_pg->page = NULL;
2519 }
2520
2521 static inline int
2522 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2523 {
2524         struct sk_buff *skb;
2525         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2526         dma_addr_t mapping;
2527         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2528         unsigned long align;
2529
2530         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2531         if (skb == NULL) {
2532                 return -ENOMEM;
2533         }
2534
2535         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2536                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2537
2538         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2539                 PCI_DMA_FROMDEVICE);
2540         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2541                 dev_kfree_skb(skb);
2542                 return -EIO;
2543         }
2544
2545         rx_buf->skb = skb;
2546         pci_unmap_addr_set(rx_buf, mapping, mapping);
2547
2548         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2549         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2550
2551         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2552
2553         return 0;
2554 }
2555
2556 static int
2557 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2558 {
2559         struct status_block *sblk = bnapi->status_blk.msi;
2560         u32 new_link_state, old_link_state;
2561         int is_set = 1;
2562
2563         new_link_state = sblk->status_attn_bits & event;
2564         old_link_state = sblk->status_attn_bits_ack & event;
2565         if (new_link_state != old_link_state) {
2566                 if (new_link_state)
2567                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2568                 else
2569                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2570         } else
2571                 is_set = 0;
2572
2573         return is_set;
2574 }
2575
2576 static void
2577 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2578 {
2579         spin_lock(&bp->phy_lock);
2580
2581         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2582                 bnx2_set_link(bp);
2583         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2584                 bnx2_set_remote_link(bp);
2585
2586         spin_unlock(&bp->phy_lock);
2587
2588 }
2589
2590 static inline u16
2591 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2592 {
2593         u16 cons;
2594
2595         /* Tell compiler that status block fields can change. */
2596         barrier();
2597         cons = *bnapi->hw_tx_cons_ptr;
2598         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2599                 cons++;
2600         return cons;
2601 }
2602
2603 static int
2604 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2605 {
2606         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2607         u16 hw_cons, sw_cons, sw_ring_cons;
2608         int tx_pkt = 0, index;
2609         struct netdev_queue *txq;
2610
2611         index = (bnapi - bp->bnx2_napi);
2612         txq = netdev_get_tx_queue(bp->dev, index);
2613
2614         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2615         sw_cons = txr->tx_cons;
2616
2617         while (sw_cons != hw_cons) {
2618                 struct sw_tx_bd *tx_buf;
2619                 struct sk_buff *skb;
2620                 int i, last;
2621
2622                 sw_ring_cons = TX_RING_IDX(sw_cons);
2623
2624                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2625                 skb = tx_buf->skb;
2626
2627                 /* partial BD completions possible with TSO packets */
2628                 if (skb_is_gso(skb)) {
2629                         u16 last_idx, last_ring_idx;
2630
2631                         last_idx = sw_cons +
2632                                 skb_shinfo(skb)->nr_frags + 1;
2633                         last_ring_idx = sw_ring_cons +
2634                                 skb_shinfo(skb)->nr_frags + 1;
2635                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2636                                 last_idx++;
2637                         }
2638                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2639                                 break;
2640                         }
2641                 }
2642
2643                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2644
2645                 tx_buf->skb = NULL;
2646                 last = skb_shinfo(skb)->nr_frags;
2647
2648                 for (i = 0; i < last; i++) {
2649                         sw_cons = NEXT_TX_BD(sw_cons);
2650                 }
2651
2652                 sw_cons = NEXT_TX_BD(sw_cons);
2653
2654                 dev_kfree_skb(skb);
2655                 tx_pkt++;
2656                 if (tx_pkt == budget)
2657                         break;
2658
2659                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2660         }
2661
2662         txr->hw_tx_cons = hw_cons;
2663         txr->tx_cons = sw_cons;
2664
2665         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2666          * before checking for netif_tx_queue_stopped().  Without the
2667          * memory barrier, there is a small possibility that bnx2_start_xmit()
2668          * will miss it and cause the queue to be stopped forever.
2669          */
2670         smp_mb();
2671
2672         if (unlikely(netif_tx_queue_stopped(txq)) &&
2673                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2674                 __netif_tx_lock(txq, smp_processor_id());
2675                 if ((netif_tx_queue_stopped(txq)) &&
2676                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2677                         netif_tx_wake_queue(txq);
2678                 __netif_tx_unlock(txq);
2679         }
2680
2681         return tx_pkt;
2682 }
2683
2684 static void
2685 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2686                         struct sk_buff *skb, int count)
2687 {
2688         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2689         struct rx_bd *cons_bd, *prod_bd;
2690         int i;
2691         u16 hw_prod, prod;
2692         u16 cons = rxr->rx_pg_cons;
2693
2694         cons_rx_pg = &rxr->rx_pg_ring[cons];
2695
2696         /* The caller was unable to allocate a new page to replace the
2697          * last one in the frags array, so we need to recycle that page
2698          * and then free the skb.
2699          */
2700         if (skb) {
2701                 struct page *page;
2702                 struct skb_shared_info *shinfo;
2703
2704                 shinfo = skb_shinfo(skb);
2705                 shinfo->nr_frags--;
2706                 page = shinfo->frags[shinfo->nr_frags].page;
2707                 shinfo->frags[shinfo->nr_frags].page = NULL;
2708
2709                 cons_rx_pg->page = page;
2710                 dev_kfree_skb(skb);
2711         }
2712
2713         hw_prod = rxr->rx_pg_prod;
2714
2715         for (i = 0; i < count; i++) {
2716                 prod = RX_PG_RING_IDX(hw_prod);
2717
2718                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2719                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2720                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2721                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2722
2723                 if (prod != cons) {
2724                         prod_rx_pg->page = cons_rx_pg->page;
2725                         cons_rx_pg->page = NULL;
2726                         pci_unmap_addr_set(prod_rx_pg, mapping,
2727                                 pci_unmap_addr(cons_rx_pg, mapping));
2728
2729                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2730                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2731
2732                 }
2733                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2734                 hw_prod = NEXT_RX_BD(hw_prod);
2735         }
2736         rxr->rx_pg_prod = hw_prod;
2737         rxr->rx_pg_cons = cons;
2738 }
2739
2740 static inline void
2741 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2742                   struct sk_buff *skb, u16 cons, u16 prod)
2743 {
2744         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2745         struct rx_bd *cons_bd, *prod_bd;
2746
2747         cons_rx_buf = &rxr->rx_buf_ring[cons];
2748         prod_rx_buf = &rxr->rx_buf_ring[prod];
2749
2750         pci_dma_sync_single_for_device(bp->pdev,
2751                 pci_unmap_addr(cons_rx_buf, mapping),
2752                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2753
2754         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2755
2756         prod_rx_buf->skb = skb;
2757
2758         if (cons == prod)
2759                 return;
2760
2761         pci_unmap_addr_set(prod_rx_buf, mapping,
2762                         pci_unmap_addr(cons_rx_buf, mapping));
2763
2764         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2765         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2766         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2767         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2768 }
2769
2770 static int
2771 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2772             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2773             u32 ring_idx)
2774 {
2775         int err;
2776         u16 prod = ring_idx & 0xffff;
2777
2778         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2779         if (unlikely(err)) {
2780                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2781                 if (hdr_len) {
2782                         unsigned int raw_len = len + 4;
2783                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2784
2785                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2786                 }
2787                 return err;
2788         }
2789
2790         skb_reserve(skb, BNX2_RX_OFFSET);
2791         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2792                          PCI_DMA_FROMDEVICE);
2793
2794         if (hdr_len == 0) {
2795                 skb_put(skb, len);
2796                 return 0;
2797         } else {
2798                 unsigned int i, frag_len, frag_size, pages;
2799                 struct sw_pg *rx_pg;
2800                 u16 pg_cons = rxr->rx_pg_cons;
2801                 u16 pg_prod = rxr->rx_pg_prod;
2802
2803                 frag_size = len + 4 - hdr_len;
2804                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2805                 skb_put(skb, hdr_len);
2806
2807                 for (i = 0; i < pages; i++) {
2808                         dma_addr_t mapping_old;
2809
2810                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2811                         if (unlikely(frag_len <= 4)) {
2812                                 unsigned int tail = 4 - frag_len;
2813
2814                                 rxr->rx_pg_cons = pg_cons;
2815                                 rxr->rx_pg_prod = pg_prod;
2816                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2817                                                         pages - i);
2818                                 skb->len -= tail;
2819                                 if (i == 0) {
2820                                         skb->tail -= tail;
2821                                 } else {
2822                                         skb_frag_t *frag =
2823                                                 &skb_shinfo(skb)->frags[i - 1];
2824                                         frag->size -= tail;
2825                                         skb->data_len -= tail;
2826                                         skb->truesize -= tail;
2827                                 }
2828                                 return 0;
2829                         }
2830                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2831
2832                         /* Don't unmap yet.  If we're unable to allocate a new
2833                          * page, we need to recycle the page and the DMA addr.
2834                          */
2835                         mapping_old = pci_unmap_addr(rx_pg, mapping);
2836                         if (i == pages - 1)
2837                                 frag_len -= 4;
2838
2839                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2840                         rx_pg->page = NULL;
2841
2842                         err = bnx2_alloc_rx_page(bp, rxr,
2843                                                  RX_PG_RING_IDX(pg_prod));
2844                         if (unlikely(err)) {
2845                                 rxr->rx_pg_cons = pg_cons;
2846                                 rxr->rx_pg_prod = pg_prod;
2847                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2848                                                         pages - i);
2849                                 return err;
2850                         }
2851
2852                         pci_unmap_page(bp->pdev, mapping_old,
2853                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2854
2855                         frag_size -= frag_len;
2856                         skb->data_len += frag_len;
2857                         skb->truesize += frag_len;
2858                         skb->len += frag_len;
2859
2860                         pg_prod = NEXT_RX_BD(pg_prod);
2861                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2862                 }
2863                 rxr->rx_pg_prod = pg_prod;
2864                 rxr->rx_pg_cons = pg_cons;
2865         }
2866         return 0;
2867 }
2868
2869 static inline u16
2870 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2871 {
2872         u16 cons;
2873
2874         /* Tell compiler that status block fields can change. */
2875         barrier();
2876         cons = *bnapi->hw_rx_cons_ptr;
2877         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2878                 cons++;
2879         return cons;
2880 }
2881
2882 static int
2883 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2884 {
2885         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2886         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2887         struct l2_fhdr *rx_hdr;
2888         int rx_pkt = 0, pg_ring_used = 0;
2889
2890         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2891         sw_cons = rxr->rx_cons;
2892         sw_prod = rxr->rx_prod;
2893
2894         /* Memory barrier necessary as speculative reads of the rx
2895          * buffer can be ahead of the index in the status block
2896          */
2897         rmb();
2898         while (sw_cons != hw_cons) {
2899                 unsigned int len, hdr_len;
2900                 u32 status;
2901                 struct sw_bd *rx_buf;
2902                 struct sk_buff *skb;
2903                 dma_addr_t dma_addr;
2904                 u16 vtag = 0;
2905                 int hw_vlan __maybe_unused = 0;
2906
2907                 sw_ring_cons = RX_RING_IDX(sw_cons);
2908                 sw_ring_prod = RX_RING_IDX(sw_prod);
2909
2910                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2911                 skb = rx_buf->skb;
2912
2913                 rx_buf->skb = NULL;
2914
2915                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2916
2917                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2918                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2919                         PCI_DMA_FROMDEVICE);
2920
2921                 rx_hdr = (struct l2_fhdr *) skb->data;
2922                 len = rx_hdr->l2_fhdr_pkt_len;
2923
2924                 if ((status = rx_hdr->l2_fhdr_status) &
2925                         (L2_FHDR_ERRORS_BAD_CRC |
2926                         L2_FHDR_ERRORS_PHY_DECODE |
2927                         L2_FHDR_ERRORS_ALIGNMENT |
2928                         L2_FHDR_ERRORS_TOO_SHORT |
2929                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2930
2931                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2932                                           sw_ring_prod);
2933                         goto next_rx;
2934                 }
2935                 hdr_len = 0;
2936                 if (status & L2_FHDR_STATUS_SPLIT) {
2937                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2938                         pg_ring_used = 1;
2939                 } else if (len > bp->rx_jumbo_thresh) {
2940                         hdr_len = bp->rx_jumbo_thresh;
2941                         pg_ring_used = 1;
2942                 }
2943
2944                 len -= 4;
2945
2946                 if (len <= bp->rx_copy_thresh) {
2947                         struct sk_buff *new_skb;
2948
2949                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
2950                         if (new_skb == NULL) {
2951                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2952                                                   sw_ring_prod);
2953                                 goto next_rx;
2954                         }
2955
2956                         /* aligned copy */
2957                         skb_copy_from_linear_data_offset(skb,
2958                                                          BNX2_RX_OFFSET - 6,
2959                                       new_skb->data, len + 6);
2960                         skb_reserve(new_skb, 6);
2961                         skb_put(new_skb, len);
2962
2963                         bnx2_reuse_rx_skb(bp, rxr, skb,
2964                                 sw_ring_cons, sw_ring_prod);
2965
2966                         skb = new_skb;
2967                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2968                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2969                         goto next_rx;
2970
2971                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2972                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2973                         vtag = rx_hdr->l2_fhdr_vlan_tag;
2974 #ifdef BCM_VLAN
2975                         if (bp->vlgrp)
2976                                 hw_vlan = 1;
2977                         else
2978 #endif
2979                         {
2980                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2981                                         __skb_push(skb, 4);
2982
2983                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
2984                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
2985                                 ve->h_vlan_TCI = htons(vtag);
2986                                 len += 4;
2987                         }
2988                 }
2989
2990                 skb->protocol = eth_type_trans(skb, bp->dev);
2991
2992                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2993                         (ntohs(skb->protocol) != 0x8100)) {
2994
2995                         dev_kfree_skb(skb);
2996                         goto next_rx;
2997
2998                 }
2999
3000                 skb->ip_summed = CHECKSUM_NONE;
3001                 if (bp->rx_csum &&
3002                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3003                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3004
3005                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3006                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3007                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3008                 }
3009
3010                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3011
3012 #ifdef BCM_VLAN
3013                 if (hw_vlan)
3014                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3015                 else
3016 #endif
3017                         netif_receive_skb(skb);
3018
3019                 rx_pkt++;
3020
3021 next_rx:
3022                 sw_cons = NEXT_RX_BD(sw_cons);
3023                 sw_prod = NEXT_RX_BD(sw_prod);
3024
3025                 if ((rx_pkt == budget))
3026                         break;
3027
3028                 /* Refresh hw_cons to see if there is new work */
3029                 if (sw_cons == hw_cons) {
3030                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3031                         rmb();
3032                 }
3033         }
3034         rxr->rx_cons = sw_cons;
3035         rxr->rx_prod = sw_prod;
3036
3037         if (pg_ring_used)
3038                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3039
3040         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3041
3042         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3043
3044         mmiowb();
3045
3046         return rx_pkt;
3047
3048 }
3049
3050 /* MSI ISR - The only difference between this and the INTx ISR
3051  * is that the MSI interrupt is always serviced.
3052  */
3053 static irqreturn_t
3054 bnx2_msi(int irq, void *dev_instance)
3055 {
3056         struct bnx2_napi *bnapi = dev_instance;
3057         struct bnx2 *bp = bnapi->bp;
3058
3059         prefetch(bnapi->status_blk.msi);
3060         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3061                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3062                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3063
3064         /* Return here if interrupt is disabled. */
3065         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3066                 return IRQ_HANDLED;
3067
3068         napi_schedule(&bnapi->napi);
3069
3070         return IRQ_HANDLED;
3071 }
3072
3073 static irqreturn_t
3074 bnx2_msi_1shot(int irq, void *dev_instance)
3075 {
3076         struct bnx2_napi *bnapi = dev_instance;
3077         struct bnx2 *bp = bnapi->bp;
3078
3079         prefetch(bnapi->status_blk.msi);
3080
3081         /* Return here if interrupt is disabled. */
3082         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3083                 return IRQ_HANDLED;
3084
3085         napi_schedule(&bnapi->napi);
3086
3087         return IRQ_HANDLED;
3088 }
3089
3090 static irqreturn_t
3091 bnx2_interrupt(int irq, void *dev_instance)
3092 {
3093         struct bnx2_napi *bnapi = dev_instance;
3094         struct bnx2 *bp = bnapi->bp;
3095         struct status_block *sblk = bnapi->status_blk.msi;
3096
3097         /* When using INTx, it is possible for the interrupt to arrive
3098          * at the CPU before the status block posted prior to the
3099          * interrupt. Reading a register will flush the status block.
3100          * When using MSI, the MSI message will always complete after
3101          * the status block write.
3102          */
3103         if ((sblk->status_idx == bnapi->last_status_idx) &&
3104             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3105              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3106                 return IRQ_NONE;
3107
3108         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3109                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3110                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3111
3112         /* Read back to deassert IRQ immediately to avoid too many
3113          * spurious interrupts.
3114          */
3115         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3116
3117         /* Return here if interrupt is shared and is disabled. */
3118         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3119                 return IRQ_HANDLED;
3120
3121         if (napi_schedule_prep(&bnapi->napi)) {
3122                 bnapi->last_status_idx = sblk->status_idx;
3123                 __napi_schedule(&bnapi->napi);
3124         }
3125
3126         return IRQ_HANDLED;
3127 }
3128
3129 static inline int
3130 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3131 {
3132         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3133         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3134
3135         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3136             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3137                 return 1;
3138         return 0;
3139 }
3140
3141 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3142                                  STATUS_ATTN_BITS_TIMER_ABORT)
3143
3144 static inline int
3145 bnx2_has_work(struct bnx2_napi *bnapi)
3146 {
3147         struct status_block *sblk = bnapi->status_blk.msi;
3148
3149         if (bnx2_has_fast_work(bnapi))
3150                 return 1;
3151
3152         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3153             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3154                 return 1;
3155
3156         return 0;
3157 }
3158
3159 static void
3160 bnx2_chk_missed_msi(struct bnx2 *bp)
3161 {
3162         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3163         u32 msi_ctrl;
3164
3165         if (bnx2_has_work(bnapi)) {
3166                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3167                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3168                         return;
3169
3170                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3171                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3172                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3173                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3174                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3175                 }
3176         }
3177
3178         bp->idle_chk_status_idx = bnapi->last_status_idx;
3179 }
3180
3181 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3182 {
3183         struct status_block *sblk = bnapi->status_blk.msi;
3184         u32 status_attn_bits = sblk->status_attn_bits;
3185         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3186
3187         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3188             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3189
3190                 bnx2_phy_int(bp, bnapi);
3191
3192                 /* This is needed to take care of transient status
3193                  * during link changes.
3194                  */
3195                 REG_WR(bp, BNX2_HC_COMMAND,
3196                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3197                 REG_RD(bp, BNX2_HC_COMMAND);
3198         }
3199 }
3200
3201 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3202                           int work_done, int budget)
3203 {
3204         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3205         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3206
3207         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3208                 bnx2_tx_int(bp, bnapi, 0);
3209
3210         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3211                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3212
3213         return work_done;
3214 }
3215
3216 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3217 {
3218         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3219         struct bnx2 *bp = bnapi->bp;
3220         int work_done = 0;
3221         struct status_block_msix *sblk = bnapi->status_blk.msix;
3222
3223         while (1) {
3224                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3225                 if (unlikely(work_done >= budget))
3226                         break;
3227
3228                 bnapi->last_status_idx = sblk->status_idx;
3229                 /* status idx must be read before checking for more work. */
3230                 rmb();
3231                 if (likely(!bnx2_has_fast_work(bnapi))) {
3232
3233                         napi_complete(napi);
3234                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3235                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3236                                bnapi->last_status_idx);
3237                         break;
3238                 }
3239         }
3240         return work_done;
3241 }
3242
3243 static int bnx2_poll(struct napi_struct *napi, int budget)
3244 {
3245         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3246         struct bnx2 *bp = bnapi->bp;
3247         int work_done = 0;
3248         struct status_block *sblk = bnapi->status_blk.msi;
3249
3250         while (1) {
3251                 bnx2_poll_link(bp, bnapi);
3252
3253                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3254
3255                 /* bnapi->last_status_idx is used below to tell the hw how
3256                  * much work has been processed, so we must read it before
3257                  * checking for more work.
3258                  */
3259                 bnapi->last_status_idx = sblk->status_idx;
3260
3261                 if (unlikely(work_done >= budget))
3262                         break;
3263
3264                 rmb();
3265                 if (likely(!bnx2_has_work(bnapi))) {
3266                         napi_complete(napi);
3267                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3268                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3269                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3270                                        bnapi->last_status_idx);
3271                                 break;
3272                         }
3273                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3274                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3275                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3276                                bnapi->last_status_idx);
3277
3278                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3279                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3280                                bnapi->last_status_idx);
3281                         break;
3282                 }
3283         }
3284
3285         return work_done;
3286 }
3287
3288 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3289  * from set_multicast.
3290  */
3291 static void
3292 bnx2_set_rx_mode(struct net_device *dev)
3293 {
3294         struct bnx2 *bp = netdev_priv(dev);
3295         u32 rx_mode, sort_mode;
3296         struct dev_addr_list *uc_ptr;
3297         int i;
3298
3299         if (!netif_running(dev))
3300                 return;
3301
3302         spin_lock_bh(&bp->phy_lock);
3303
3304         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3305                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3306         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3307 #ifdef BCM_VLAN
3308         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3309                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3310 #else
3311         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3312                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3313 #endif
3314         if (dev->flags & IFF_PROMISC) {
3315                 /* Promiscuous mode. */
3316                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3317                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3318                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3319         }
3320         else if (dev->flags & IFF_ALLMULTI) {
3321                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3322                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3323                                0xffffffff);
3324                 }
3325                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3326         }
3327         else {
3328                 /* Accept one or more multicast(s). */
3329                 struct dev_mc_list *mclist;
3330                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3331                 u32 regidx;
3332                 u32 bit;
3333                 u32 crc;
3334
3335                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3336
3337                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3338                      i++, mclist = mclist->next) {
3339
3340                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3341                         bit = crc & 0xff;
3342                         regidx = (bit & 0xe0) >> 5;
3343                         bit &= 0x1f;
3344                         mc_filter[regidx] |= (1 << bit);
3345                 }
3346
3347                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3348                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3349                                mc_filter[i]);
3350                 }
3351
3352                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3353         }
3354
3355         uc_ptr = NULL;
3356         if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3357                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3358                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3359                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3360         } else if (!(dev->flags & IFF_PROMISC)) {
3361                 uc_ptr = dev->uc_list;
3362
3363                 /* Add all entries into to the match filter list */
3364                 for (i = 0; i < dev->uc_count; i++) {
3365                         bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3366                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3367                         sort_mode |= (1 <<
3368                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3369                         uc_ptr = uc_ptr->next;
3370                 }
3371
3372         }
3373
3374         if (rx_mode != bp->rx_mode) {
3375                 bp->rx_mode = rx_mode;
3376                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3377         }
3378
3379         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3380         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3381         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3382
3383         spin_unlock_bh(&bp->phy_lock);
3384 }
3385
3386 static void
3387 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3388         u32 rv2p_proc)
3389 {
3390         int i;
3391         u32 val;
3392
3393         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3394                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3395                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3396                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3397                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3398         }
3399
3400         for (i = 0; i < rv2p_code_len; i += 8) {
3401                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3402                 rv2p_code++;
3403                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3404                 rv2p_code++;
3405
3406                 if (rv2p_proc == RV2P_PROC1) {
3407                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3408                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3409                 }
3410                 else {
3411                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3412                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3413                 }
3414         }
3415
3416         /* Reset the processor, un-stall is done later. */
3417         if (rv2p_proc == RV2P_PROC1) {
3418                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3419         }
3420         else {
3421                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3422         }
3423 }
3424
3425 static int
3426 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3427 {
3428         u32 offset;
3429         u32 val;
3430         int rc;
3431
3432         /* Halt the CPU. */
3433         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3434         val |= cpu_reg->mode_value_halt;
3435         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3436         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3437
3438         /* Load the Text area. */
3439         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3440         if (fw->gz_text) {
3441                 int j;
3442
3443                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3444                                        fw->gz_text_len);
3445                 if (rc < 0)
3446                         return rc;
3447
3448                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3449                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3450                 }
3451         }
3452
3453         /* Load the Data area. */
3454         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3455         if (fw->data) {
3456                 int j;
3457
3458                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3459                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3460                 }
3461         }
3462
3463         /* Load the SBSS area. */
3464         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3465         if (fw->sbss_len) {
3466                 int j;
3467
3468                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3469                         bnx2_reg_wr_ind(bp, offset, 0);
3470                 }
3471         }
3472
3473         /* Load the BSS area. */
3474         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3475         if (fw->bss_len) {
3476                 int j;
3477
3478                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3479                         bnx2_reg_wr_ind(bp, offset, 0);
3480                 }
3481         }
3482
3483         /* Load the Read-Only area. */
3484         offset = cpu_reg->spad_base +
3485                 (fw->rodata_addr - cpu_reg->mips_view_base);
3486         if (fw->rodata) {
3487                 int j;
3488
3489                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3490                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3491                 }
3492         }
3493
3494         /* Clear the pre-fetch instruction. */
3495         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3496         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3497
3498         /* Start the CPU. */
3499         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3500         val &= ~cpu_reg->mode_value_halt;
3501         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3502         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3503
3504         return 0;
3505 }
3506
3507 static int
3508 bnx2_init_cpus(struct bnx2 *bp)
3509 {
3510         struct fw_info *fw;
3511         int rc, rv2p_len;
3512         void *text, *rv2p;
3513
3514         /* Initialize the RV2P processor. */
3515         text = vmalloc(FW_BUF_SIZE);
3516         if (!text)
3517                 return -ENOMEM;
3518         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3519                 rv2p = bnx2_xi_rv2p_proc1;
3520                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3521         } else {
3522                 rv2p = bnx2_rv2p_proc1;
3523                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3524         }
3525         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3526         if (rc < 0)
3527                 goto init_cpu_err;
3528
3529         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3530
3531         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3532                 rv2p = bnx2_xi_rv2p_proc2;
3533                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3534         } else {
3535                 rv2p = bnx2_rv2p_proc2;
3536                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3537         }
3538         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3539         if (rc < 0)
3540                 goto init_cpu_err;
3541
3542         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3543
3544         /* Initialize the RX Processor. */
3545         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3546                 fw = &bnx2_rxp_fw_09;
3547         else
3548                 fw = &bnx2_rxp_fw_06;
3549
3550         fw->text = text;
3551         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3552         if (rc)
3553                 goto init_cpu_err;
3554
3555         /* Initialize the TX Processor. */
3556         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3557                 fw = &bnx2_txp_fw_09;
3558         else
3559                 fw = &bnx2_txp_fw_06;
3560
3561         fw->text = text;
3562         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3563         if (rc)
3564                 goto init_cpu_err;
3565
3566         /* Initialize the TX Patch-up Processor. */
3567         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3568                 fw = &bnx2_tpat_fw_09;
3569         else
3570                 fw = &bnx2_tpat_fw_06;
3571
3572         fw->text = text;
3573         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3574         if (rc)
3575                 goto init_cpu_err;
3576
3577         /* Initialize the Completion Processor. */
3578         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3579                 fw = &bnx2_com_fw_09;
3580         else
3581                 fw = &bnx2_com_fw_06;
3582
3583         fw->text = text;
3584         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3585         if (rc)
3586                 goto init_cpu_err;
3587
3588         /* Initialize the Command Processor. */
3589         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3590                 fw = &bnx2_cp_fw_09;
3591         else
3592                 fw = &bnx2_cp_fw_06;
3593
3594         fw->text = text;
3595         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3596
3597 init_cpu_err:
3598         vfree(text);
3599         return rc;
3600 }
3601
3602 static int
3603 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3604 {
3605         u16 pmcsr;
3606
3607         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3608
3609         switch (state) {
3610         case PCI_D0: {
3611                 u32 val;
3612
3613                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3614                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3615                         PCI_PM_CTRL_PME_STATUS);
3616
3617                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3618                         /* delay required during transition out of D3hot */
3619                         msleep(20);
3620
3621                 val = REG_RD(bp, BNX2_EMAC_MODE);
3622                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3623                 val &= ~BNX2_EMAC_MODE_MPKT;
3624                 REG_WR(bp, BNX2_EMAC_MODE, val);
3625
3626                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3627                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3628                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3629                 break;
3630         }
3631         case PCI_D3hot: {
3632                 int i;
3633                 u32 val, wol_msg;
3634
3635                 if (bp->wol) {
3636                         u32 advertising;
3637                         u8 autoneg;
3638
3639                         autoneg = bp->autoneg;
3640                         advertising = bp->advertising;
3641
3642                         if (bp->phy_port == PORT_TP) {
3643                                 bp->autoneg = AUTONEG_SPEED;
3644                                 bp->advertising = ADVERTISED_10baseT_Half |
3645                                         ADVERTISED_10baseT_Full |
3646                                         ADVERTISED_100baseT_Half |
3647                                         ADVERTISED_100baseT_Full |
3648                                         ADVERTISED_Autoneg;
3649                         }
3650
3651                         spin_lock_bh(&bp->phy_lock);
3652                         bnx2_setup_phy(bp, bp->phy_port);
3653                         spin_unlock_bh(&bp->phy_lock);
3654
3655                         bp->autoneg = autoneg;
3656                         bp->advertising = advertising;
3657
3658                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3659
3660                         val = REG_RD(bp, BNX2_EMAC_MODE);
3661
3662                         /* Enable port mode. */
3663                         val &= ~BNX2_EMAC_MODE_PORT;
3664                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3665                                BNX2_EMAC_MODE_ACPI_RCVD |
3666                                BNX2_EMAC_MODE_MPKT;
3667                         if (bp->phy_port == PORT_TP)
3668                                 val |= BNX2_EMAC_MODE_PORT_MII;
3669                         else {
3670                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3671                                 if (bp->line_speed == SPEED_2500)
3672                                         val |= BNX2_EMAC_MODE_25G_MODE;
3673                         }
3674
3675                         REG_WR(bp, BNX2_EMAC_MODE, val);
3676
3677                         /* receive all multicast */
3678                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3679                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3680                                        0xffffffff);
3681                         }
3682                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3683                                BNX2_EMAC_RX_MODE_SORT_MODE);
3684
3685                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3686                               BNX2_RPM_SORT_USER0_MC_EN;
3687                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3688                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3689                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3690                                BNX2_RPM_SORT_USER0_ENA);
3691
3692                         /* Need to enable EMAC and RPM for WOL. */
3693                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3694                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3695                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3696                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3697
3698                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3699                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3700                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3701
3702                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3703                 }
3704                 else {
3705                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3706                 }
3707
3708                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3709                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3710                                      1, 0);
3711
3712                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3713                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3714                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3715
3716                         if (bp->wol)
3717                                 pmcsr |= 3;
3718                 }
3719                 else {
3720                         pmcsr |= 3;
3721                 }
3722                 if (bp->wol) {
3723                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3724                 }
3725                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3726                                       pmcsr);
3727
3728                 /* No more memory access after this point until
3729                  * device is brought back to D0.
3730                  */
3731                 udelay(50);
3732                 break;
3733         }
3734         default:
3735                 return -EINVAL;
3736         }
3737         return 0;
3738 }
3739
3740 static int
3741 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3742 {
3743         u32 val;
3744         int j;
3745
3746         /* Request access to the flash interface. */
3747         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3748         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3749                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3750                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3751                         break;
3752
3753                 udelay(5);
3754         }
3755
3756         if (j >= NVRAM_TIMEOUT_COUNT)
3757                 return -EBUSY;
3758
3759         return 0;
3760 }
3761
3762 static int
3763 bnx2_release_nvram_lock(struct bnx2 *bp)
3764 {
3765         int j;
3766         u32 val;
3767
3768         /* Relinquish nvram interface. */
3769         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3770
3771         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3772                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3773                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3774                         break;
3775
3776                 udelay(5);
3777         }
3778
3779         if (j >= NVRAM_TIMEOUT_COUNT)
3780                 return -EBUSY;
3781
3782         return 0;
3783 }
3784
3785
3786 static int
3787 bnx2_enable_nvram_write(struct bnx2 *bp)
3788 {
3789         u32 val;
3790
3791         val = REG_RD(bp, BNX2_MISC_CFG);
3792         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3793
3794         if (bp->flash_info->flags & BNX2_NV_WREN) {
3795                 int j;
3796
3797                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3798                 REG_WR(bp, BNX2_NVM_COMMAND,
3799                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3800
3801                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3802                         udelay(5);
3803
3804                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3805                         if (val & BNX2_NVM_COMMAND_DONE)
3806                                 break;
3807                 }
3808
3809                 if (j >= NVRAM_TIMEOUT_COUNT)
3810                         return -EBUSY;
3811         }
3812         return 0;
3813 }
3814
3815 static void
3816 bnx2_disable_nvram_write(struct bnx2 *bp)
3817 {
3818         u32 val;
3819
3820         val = REG_RD(bp, BNX2_MISC_CFG);
3821         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3822 }
3823
3824
3825 static void
3826 bnx2_enable_nvram_access(struct bnx2 *bp)
3827 {
3828         u32 val;
3829
3830         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3831         /* Enable both bits, even on read. */
3832         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3833                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3834 }
3835
3836 static void
3837 bnx2_disable_nvram_access(struct bnx2 *bp)
3838 {
3839         u32 val;
3840
3841         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3842         /* Disable both bits, even after read. */
3843         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3844                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3845                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3846 }
3847
3848 static int
3849 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3850 {
3851         u32 cmd;
3852         int j;
3853
3854         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3855                 /* Buffered flash, no erase needed */
3856                 return 0;
3857
3858         /* Build an erase command */
3859         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3860               BNX2_NVM_COMMAND_DOIT;
3861
3862         /* Need to clear DONE bit separately. */
3863         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3864
3865         /* Address of the NVRAM to read from. */
3866         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3867
3868         /* Issue an erase command. */
3869         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3870
3871         /* Wait for completion. */
3872         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3873                 u32 val;
3874
3875                 udelay(5);
3876
3877                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3878                 if (val & BNX2_NVM_COMMAND_DONE)
3879                         break;
3880         }
3881
3882         if (j >= NVRAM_TIMEOUT_COUNT)
3883                 return -EBUSY;
3884
3885         return 0;
3886 }
3887
3888 static int
3889 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3890 {
3891         u32 cmd;
3892         int j;
3893
3894         /* Build the command word. */
3895         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3896
3897         /* Calculate an offset of a buffered flash, not needed for 5709. */
3898         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3899                 offset = ((offset / bp->flash_info->page_size) <<
3900                            bp->flash_info->page_bits) +
3901                           (offset % bp->flash_info->page_size);
3902         }
3903
3904         /* Need to clear DONE bit separately. */
3905         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3906
3907         /* Address of the NVRAM to read from. */
3908         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3909
3910         /* Issue a read command. */
3911         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3912
3913         /* Wait for completion. */
3914         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3915                 u32 val;
3916
3917                 udelay(5);
3918
3919                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3920                 if (val & BNX2_NVM_COMMAND_DONE) {
3921                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3922                         memcpy(ret_val, &v, 4);
3923                         break;
3924                 }
3925         }
3926         if (j >= NVRAM_TIMEOUT_COUNT)
3927                 return -EBUSY;
3928
3929         return 0;
3930 }
3931
3932
3933 static int
3934 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3935 {
3936         u32 cmd;
3937         __be32 val32;
3938         int j;
3939
3940         /* Build the command word. */
3941         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3942
3943         /* Calculate an offset of a buffered flash, not needed for 5709. */
3944         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3945                 offset = ((offset / bp->flash_info->page_size) <<
3946                           bp->flash_info->page_bits) +
3947                          (offset % bp->flash_info->page_size);
3948         }
3949
3950         /* Need to clear DONE bit separately. */
3951         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3952
3953         memcpy(&val32, val, 4);
3954
3955         /* Write the data. */
3956         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3957
3958         /* Address of the NVRAM to write to. */
3959         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3960
3961         /* Issue the write command. */
3962         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3963
3964         /* Wait for completion. */
3965         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3966                 udelay(5);
3967
3968                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3969                         break;
3970         }
3971         if (j >= NVRAM_TIMEOUT_COUNT)
3972                 return -EBUSY;
3973
3974         return 0;
3975 }
3976
3977 static int
3978 bnx2_init_nvram(struct bnx2 *bp)
3979 {
3980         u32 val;
3981         int j, entry_count, rc = 0;
3982         struct flash_spec *flash;
3983
3984         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3985                 bp->flash_info = &flash_5709;
3986                 goto get_flash_size;
3987         }
3988
3989         /* Determine the selected interface. */
3990         val = REG_RD(bp, BNX2_NVM_CFG1);
3991
3992         entry_count = ARRAY_SIZE(flash_table);
3993
3994         if (val & 0x40000000) {
3995
3996                 /* Flash interface has been reconfigured */
3997                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3998                      j++, flash++) {
3999                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4000                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4001                                 bp->flash_info = flash;
4002                                 break;
4003                         }
4004                 }
4005         }
4006         else {
4007                 u32 mask;
4008                 /* Not yet been reconfigured */
4009
4010                 if (val & (1 << 23))
4011                         mask = FLASH_BACKUP_STRAP_MASK;
4012                 else
4013                         mask = FLASH_STRAP_MASK;
4014
4015                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4016                         j++, flash++) {
4017
4018                         if ((val & mask) == (flash->strapping & mask)) {
4019                                 bp->flash_info = flash;
4020
4021                                 /* Request access to the flash interface. */
4022                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4023                                         return rc;
4024
4025                                 /* Enable access to flash interface */
4026                                 bnx2_enable_nvram_access(bp);
4027
4028                                 /* Reconfigure the flash interface */
4029                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4030                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4031                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4032                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4033
4034                                 /* Disable access to flash interface */
4035                                 bnx2_disable_nvram_access(bp);
4036                                 bnx2_release_nvram_lock(bp);
4037
4038                                 break;
4039                         }
4040                 }
4041         } /* if (val & 0x40000000) */
4042
4043         if (j == entry_count) {
4044                 bp->flash_info = NULL;
4045                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4046                 return -ENODEV;
4047         }
4048
4049 get_flash_size:
4050         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4051         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4052         if (val)
4053                 bp->flash_size = val;
4054         else
4055                 bp->flash_size = bp->flash_info->total_size;
4056
4057         return rc;
4058 }
4059
4060 static int
4061 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4062                 int buf_size)
4063 {
4064         int rc = 0;
4065         u32 cmd_flags, offset32, len32, extra;
4066
4067         if (buf_size == 0)
4068                 return 0;
4069
4070         /* Request access to the flash interface. */
4071         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4072                 return rc;
4073
4074         /* Enable access to flash interface */
4075         bnx2_enable_nvram_access(bp);
4076
4077         len32 = buf_size;
4078         offset32 = offset;
4079         extra = 0;
4080
4081         cmd_flags = 0;
4082
4083         if (offset32 & 3) {
4084                 u8 buf[4];
4085                 u32 pre_len;
4086
4087                 offset32 &= ~3;
4088                 pre_len = 4 - (offset & 3);
4089
4090                 if (pre_len >= len32) {
4091                         pre_len = len32;
4092                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4093                                     BNX2_NVM_COMMAND_LAST;
4094                 }
4095                 else {
4096                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4097                 }
4098
4099                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4100
4101                 if (rc)
4102                         return rc;
4103
4104                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4105
4106                 offset32 += 4;
4107                 ret_buf += pre_len;
4108                 len32 -= pre_len;
4109         }
4110         if (len32 & 3) {
4111                 extra = 4 - (len32 & 3);
4112                 len32 = (len32 + 4) & ~3;
4113         }
4114
4115         if (len32 == 4) {
4116                 u8 buf[4];
4117
4118                 if (cmd_flags)
4119                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4120                 else
4121                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4122                                     BNX2_NVM_COMMAND_LAST;
4123
4124                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4125
4126                 memcpy(ret_buf, buf, 4 - extra);
4127         }
4128         else if (len32 > 0) {
4129                 u8 buf[4];
4130
4131                 /* Read the first word. */
4132                 if (cmd_flags)
4133                         cmd_flags = 0;
4134                 else
4135                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4136
4137                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4138
4139                 /* Advance to the next dword. */
4140                 offset32 += 4;
4141                 ret_buf += 4;
4142                 len32 -= 4;
4143
4144                 while (len32 > 4 && rc == 0) {
4145                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4146
4147                         /* Advance to the next dword. */
4148                         offset32 += 4;
4149                         ret_buf += 4;
4150                         len32 -= 4;
4151                 }
4152
4153                 if (rc)
4154                         return rc;
4155
4156                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4157                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4158
4159                 memcpy(ret_buf, buf, 4 - extra);
4160         }
4161
4162         /* Disable access to flash interface */
4163         bnx2_disable_nvram_access(bp);
4164
4165         bnx2_release_nvram_lock(bp);
4166
4167         return rc;
4168 }
4169
4170 static int
4171 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4172                 int buf_size)
4173 {
4174         u32 written, offset32, len32;
4175         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4176         int rc = 0;
4177         int align_start, align_end;
4178
4179         buf = data_buf;
4180         offset32 = offset;
4181         len32 = buf_size;
4182         align_start = align_end = 0;
4183
4184         if ((align_start = (offset32 & 3))) {
4185                 offset32 &= ~3;
4186                 len32 += align_start;
4187                 if (len32 < 4)
4188                         len32 = 4;
4189                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4190                         return rc;
4191         }
4192
4193         if (len32 & 3) {
4194                 align_end = 4 - (len32 & 3);
4195                 len32 += align_end;
4196                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4197                         return rc;
4198         }
4199
4200         if (align_start || align_end) {
4201                 align_buf = kmalloc(len32, GFP_KERNEL);
4202                 if (align_buf == NULL)
4203                         return -ENOMEM;
4204                 if (align_start) {
4205                         memcpy(align_buf, start, 4);
4206                 }
4207                 if (align_end) {
4208                         memcpy(align_buf + len32 - 4, end, 4);
4209                 }
4210                 memcpy(align_buf + align_start, data_buf, buf_size);
4211                 buf = align_buf;
4212         }
4213
4214         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4215                 flash_buffer = kmalloc(264, GFP_KERNEL);
4216                 if (flash_buffer == NULL) {
4217                         rc = -ENOMEM;
4218                         goto nvram_write_end;
4219                 }
4220         }
4221
4222         written = 0;
4223         while ((written < len32) && (rc == 0)) {
4224                 u32 page_start, page_end, data_start, data_end;
4225                 u32 addr, cmd_flags;
4226                 int i;
4227
4228                 /* Find the page_start addr */
4229                 page_start = offset32 + written;
4230                 page_start -= (page_start % bp->flash_info->page_size);
4231                 /* Find the page_end addr */
4232                 page_end = page_start + bp->flash_info->page_size;
4233                 /* Find the data_start addr */
4234                 data_start = (written == 0) ? offset32 : page_start;
4235                 /* Find the data_end addr */
4236                 data_end = (page_end > offset32 + len32) ?
4237                         (offset32 + len32) : page_end;
4238
4239                 /* Request access to the flash interface. */
4240                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4241                         goto nvram_write_end;
4242
4243                 /* Enable access to flash interface */
4244                 bnx2_enable_nvram_access(bp);
4245
4246                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4247                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4248                         int j;
4249
4250                         /* Read the whole page into the buffer
4251                          * (non-buffer flash only) */
4252                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4253                                 if (j == (bp->flash_info->page_size - 4)) {
4254                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4255                                 }
4256                                 rc = bnx2_nvram_read_dword(bp,
4257                                         page_start + j,
4258                                         &flash_buffer[j],
4259                                         cmd_flags);
4260
4261                                 if (rc)
4262                                         goto nvram_write_end;
4263
4264                                 cmd_flags = 0;
4265                         }
4266                 }
4267
4268                 /* Enable writes to flash interface (unlock write-protect) */
4269                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4270                         goto nvram_write_end;
4271
4272                 /* Loop to write back the buffer data from page_start to
4273                  * data_start */
4274                 i = 0;
4275                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4276                         /* Erase the page */
4277                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4278                                 goto nvram_write_end;
4279
4280                         /* Re-enable the write again for the actual write */
4281                         bnx2_enable_nvram_write(bp);
4282
4283                         for (addr = page_start; addr < data_start;
4284                                 addr += 4, i += 4) {
4285
4286                                 rc = bnx2_nvram_write_dword(bp, addr,
4287                                         &flash_buffer[i], cmd_flags);
4288
4289                                 if (rc != 0)
4290                                         goto nvram_write_end;
4291
4292                                 cmd_flags = 0;
4293                         }
4294                 }
4295
4296                 /* Loop to write the new data from data_start to data_end */
4297                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4298                         if ((addr == page_end - 4) ||
4299                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4300                                  (addr == data_end - 4))) {
4301
4302                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4303                         }
4304                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4305                                 cmd_flags);
4306
4307                         if (rc != 0)
4308                                 goto nvram_write_end;
4309
4310                         cmd_flags = 0;
4311                         buf += 4;
4312                 }
4313
4314                 /* Loop to write back the buffer data from data_end
4315                  * to page_end */
4316                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4317                         for (addr = data_end; addr < page_end;
4318                                 addr += 4, i += 4) {
4319
4320                                 if (addr == page_end-4) {
4321                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4322                                 }
4323                                 rc = bnx2_nvram_write_dword(bp, addr,
4324                                         &flash_buffer[i], cmd_flags);
4325
4326                                 if (rc != 0)
4327                                         goto nvram_write_end;
4328
4329                                 cmd_flags = 0;
4330                         }
4331                 }
4332
4333                 /* Disable writes to flash interface (lock write-protect) */
4334                 bnx2_disable_nvram_write(bp);
4335
4336                 /* Disable access to flash interface */
4337                 bnx2_disable_nvram_access(bp);
4338                 bnx2_release_nvram_lock(bp);
4339
4340                 /* Increment written */
4341                 written += data_end - data_start;
4342         }
4343
4344 nvram_write_end:
4345         kfree(flash_buffer);
4346         kfree(align_buf);
4347         return rc;
4348 }
4349
4350 static void
4351 bnx2_init_fw_cap(struct bnx2 *bp)
4352 {
4353         u32 val, sig = 0;
4354
4355         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4356         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4357
4358         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4359                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4360
4361         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4362         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4363                 return;
4364
4365         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4366                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4367                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4368         }
4369
4370         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4371             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4372                 u32 link;
4373
4374                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4375
4376                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4377                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4378                         bp->phy_port = PORT_FIBRE;
4379                 else
4380                         bp->phy_port = PORT_TP;
4381
4382                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4383                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4384         }
4385
4386         if (netif_running(bp->dev) && sig)
4387                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4388 }
4389
4390 static void
4391 bnx2_setup_msix_tbl(struct bnx2 *bp)
4392 {
4393         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4394
4395         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4396         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4397 }
4398
4399 static int
4400 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4401 {
4402         u32 val;
4403         int i, rc = 0;
4404         u8 old_port;
4405
4406         /* Wait for the current PCI transaction to complete before
4407          * issuing a reset. */
4408         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4409                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4410                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4411                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4412                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4413         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4414         udelay(5);
4415
4416         /* Wait for the firmware to tell us it is ok to issue a reset. */
4417         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4418
4419         /* Deposit a driver reset signature so the firmware knows that
4420          * this is a soft reset. */
4421         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4422                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4423
4424         /* Do a dummy read to force the chip to complete all current transaction
4425          * before we issue a reset. */
4426         val = REG_RD(bp, BNX2_MISC_ID);
4427
4428         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4429                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4430                 REG_RD(bp, BNX2_MISC_COMMAND);
4431                 udelay(5);
4432
4433                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4434                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4435
4436                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4437
4438         } else {
4439                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4440                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4441                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4442
4443                 /* Chip reset. */
4444                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4445
4446                 /* Reading back any register after chip reset will hang the
4447                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4448                  * of margin for write posting.
4449                  */
4450                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4451                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4452                         msleep(20);
4453
4454                 /* Reset takes approximate 30 usec */
4455                 for (i = 0; i < 10; i++) {
4456                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4457                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4458                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4459                                 break;
4460                         udelay(10);
4461                 }
4462
4463                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4464                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4465                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4466                         return -EBUSY;
4467                 }
4468         }
4469
4470         /* Make sure byte swapping is properly configured. */
4471         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4472         if (val != 0x01020304) {
4473                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4474                 return -ENODEV;
4475         }
4476
4477         /* Wait for the firmware to finish its initialization. */
4478         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4479         if (rc)
4480                 return rc;
4481
4482         spin_lock_bh(&bp->phy_lock);
4483         old_port = bp->phy_port;
4484         bnx2_init_fw_cap(bp);
4485         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4486             old_port != bp->phy_port)
4487                 bnx2_set_default_remote_link(bp);
4488         spin_unlock_bh(&bp->phy_lock);
4489
4490         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4491                 /* Adjust the voltage regular to two steps lower.  The default
4492                  * of this register is 0x0000000e. */
4493                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4494
4495                 /* Remove bad rbuf memory from the free pool. */
4496                 rc = bnx2_alloc_bad_rbuf(bp);
4497         }
4498
4499         if (bp->flags & BNX2_FLAG_USING_MSIX)
4500                 bnx2_setup_msix_tbl(bp);
4501
4502         return rc;
4503 }
4504
4505 static int
4506 bnx2_init_chip(struct bnx2 *bp)
4507 {
4508         u32 val, mtu;
4509         int rc, i;
4510
4511         /* Make sure the interrupt is not active. */
4512         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4513
4514         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4515               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4516 #ifdef __BIG_ENDIAN
4517               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4518 #endif
4519               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4520               DMA_READ_CHANS << 12 |
4521               DMA_WRITE_CHANS << 16;
4522
4523         val |= (0x2 << 20) | (1 << 11);
4524
4525         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4526                 val |= (1 << 23);
4527
4528         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4529             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4530                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4531
4532         REG_WR(bp, BNX2_DMA_CONFIG, val);
4533
4534         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4535                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4536                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4537                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4538         }
4539
4540         if (bp->flags & BNX2_FLAG_PCIX) {
4541                 u16 val16;
4542
4543                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4544                                      &val16);
4545                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4546                                       val16 & ~PCI_X_CMD_ERO);
4547         }
4548
4549         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4550                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4551                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4552                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4553
4554         /* Initialize context mapping and zero out the quick contexts.  The
4555          * context block must have already been enabled. */
4556         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4557                 rc = bnx2_init_5709_context(bp);
4558                 if (rc)
4559                         return rc;
4560         } else
4561                 bnx2_init_context(bp);
4562
4563         if ((rc = bnx2_init_cpus(bp)) != 0)
4564                 return rc;
4565
4566         bnx2_init_nvram(bp);
4567
4568         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4569
4570         val = REG_RD(bp, BNX2_MQ_CONFIG);
4571         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4572         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4573         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4574                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4575
4576         REG_WR(bp, BNX2_MQ_CONFIG, val);
4577
4578         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4579         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4580         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4581
4582         val = (BCM_PAGE_BITS - 8) << 24;
4583         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4584
4585         /* Configure page size. */
4586         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4587         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4588         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4589         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4590
4591         val = bp->mac_addr[0] +
4592               (bp->mac_addr[1] << 8) +
4593               (bp->mac_addr[2] << 16) +
4594               bp->mac_addr[3] +
4595               (bp->mac_addr[4] << 8) +
4596               (bp->mac_addr[5] << 16);
4597         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4598
4599         /* Program the MTU.  Also include 4 bytes for CRC32. */
4600         mtu = bp->dev->mtu;
4601         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4602         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4603                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4604         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4605
4606         if (mtu < 1500)
4607                 mtu = 1500;
4608
4609         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4610         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4611         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4612
4613         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4614                 bp->bnx2_napi[i].last_status_idx = 0;
4615
4616         bp->idle_chk_status_idx = 0xffff;
4617
4618         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4619
4620         /* Set up how to generate a link change interrupt. */
4621         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4622
4623         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4624                (u64) bp->status_blk_mapping & 0xffffffff);
4625         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4626
4627         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4628                (u64) bp->stats_blk_mapping & 0xffffffff);
4629         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4630                (u64) bp->stats_blk_mapping >> 32);
4631
4632         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4633                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4634
4635         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4636                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4637
4638         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4639                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4640
4641         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4642
4643         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4644
4645         REG_WR(bp, BNX2_HC_COM_TICKS,
4646                (bp->com_ticks_int << 16) | bp->com_ticks);
4647
4648         REG_WR(bp, BNX2_HC_CMD_TICKS,
4649                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4650
4651         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4652                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4653         else
4654                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4655         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4656
4657         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4658                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4659         else {
4660                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4661                       BNX2_HC_CONFIG_COLLECT_STATS;
4662         }
4663
4664         if (bp->irq_nvecs > 1) {
4665                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4666                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4667
4668                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4669         }
4670
4671         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4672                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4673
4674         REG_WR(bp, BNX2_HC_CONFIG, val);
4675
4676         for (i = 1; i < bp->irq_nvecs; i++) {
4677                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4678                            BNX2_HC_SB_CONFIG_1;
4679
4680                 REG_WR(bp, base,
4681                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4682                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4683                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4684
4685                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4686                         (bp->tx_quick_cons_trip_int << 16) |
4687                          bp->tx_quick_cons_trip);
4688
4689                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4690                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4691
4692                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4693                        (bp->rx_quick_cons_trip_int << 16) |
4694                         bp->rx_quick_cons_trip);
4695
4696                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4697                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4698         }
4699
4700         /* Clear internal stats counters. */
4701         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4702
4703         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4704
4705         /* Initialize the receive filter. */
4706         bnx2_set_rx_mode(bp->dev);
4707
4708         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4709                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4710                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4711                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4712         }
4713         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4714                           1, 0);
4715
4716         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4717         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4718
4719         udelay(20);
4720
4721         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4722
4723         return rc;
4724 }
4725
4726 static void
4727 bnx2_clear_ring_states(struct bnx2 *bp)
4728 {
4729         struct bnx2_napi *bnapi;
4730         struct bnx2_tx_ring_info *txr;
4731         struct bnx2_rx_ring_info *rxr;
4732         int i;
4733
4734         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4735                 bnapi = &bp->bnx2_napi[i];
4736                 txr = &bnapi->tx_ring;
4737                 rxr = &bnapi->rx_ring;
4738
4739                 txr->tx_cons = 0;
4740                 txr->hw_tx_cons = 0;
4741                 rxr->rx_prod_bseq = 0;
4742                 rxr->rx_prod = 0;
4743                 rxr->rx_cons = 0;
4744                 rxr->rx_pg_prod = 0;
4745                 rxr->rx_pg_cons = 0;
4746         }
4747 }
4748
4749 static void
4750 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4751 {
4752         u32 val, offset0, offset1, offset2, offset3;
4753         u32 cid_addr = GET_CID_ADDR(cid);
4754
4755         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4756                 offset0 = BNX2_L2CTX_TYPE_XI;
4757                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4758                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4759                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4760         } else {
4761                 offset0 = BNX2_L2CTX_TYPE;
4762                 offset1 = BNX2_L2CTX_CMD_TYPE;
4763                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4764                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4765         }
4766         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4767         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4768
4769         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4770         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4771
4772         val = (u64) txr->tx_desc_mapping >> 32;
4773         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4774
4775         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4776         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4777 }
4778
4779 static void
4780 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4781 {
4782         struct tx_bd *txbd;
4783         u32 cid = TX_CID;
4784         struct bnx2_napi *bnapi;
4785         struct bnx2_tx_ring_info *txr;
4786
4787         bnapi = &bp->bnx2_napi[ring_num];
4788         txr = &bnapi->tx_ring;
4789
4790         if (ring_num == 0)
4791                 cid = TX_CID;
4792         else
4793                 cid = TX_TSS_CID + ring_num - 1;
4794
4795         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4796
4797         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4798
4799         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4800         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4801
4802         txr->tx_prod = 0;
4803         txr->tx_prod_bseq = 0;
4804
4805         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4806         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4807
4808         bnx2_init_tx_context(bp, cid, txr);
4809 }
4810
4811 static void
4812 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4813                      int num_rings)
4814 {
4815         int i;
4816         struct rx_bd *rxbd;
4817
4818         for (i = 0; i < num_rings; i++) {
4819                 int j;
4820
4821                 rxbd = &rx_ring[i][0];
4822                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4823                         rxbd->rx_bd_len = buf_size;
4824                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4825                 }
4826                 if (i == (num_rings - 1))
4827                         j = 0;
4828                 else
4829                         j = i + 1;
4830                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4831                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4832         }
4833 }
4834
4835 static void
4836 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4837 {
4838         int i;
4839         u16 prod, ring_prod;
4840         u32 cid, rx_cid_addr, val;
4841         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4842         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4843
4844         if (ring_num == 0)
4845                 cid = RX_CID;
4846         else
4847                 cid = RX_RSS_CID + ring_num - 1;
4848
4849         rx_cid_addr = GET_CID_ADDR(cid);
4850
4851         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4852                              bp->rx_buf_use_size, bp->rx_max_ring);
4853
4854         bnx2_init_rx_context(bp, cid);
4855
4856         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4857                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4858                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4859         }
4860
4861         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4862         if (bp->rx_pg_ring_size) {
4863                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4864                                      rxr->rx_pg_desc_mapping,
4865                                      PAGE_SIZE, bp->rx_max_pg_ring);
4866                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4867                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4868                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4869                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4870
4871                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4872                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4873
4874                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4875                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4876
4877                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4878                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4879         }
4880
4881         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4882         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4883
4884         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4885         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4886
4887         ring_prod = prod = rxr->rx_pg_prod;
4888         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4889                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4890                         break;
4891                 prod = NEXT_RX_BD(prod);
4892                 ring_prod = RX_PG_RING_IDX(prod);
4893         }
4894         rxr->rx_pg_prod = prod;
4895
4896         ring_prod = prod = rxr->rx_prod;
4897         for (i = 0; i < bp->rx_ring_size; i++) {
4898                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4899                         break;
4900                 prod = NEXT_RX_BD(prod);
4901                 ring_prod = RX_RING_IDX(prod);
4902         }
4903         rxr->rx_prod = prod;
4904
4905         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4906         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4907         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4908
4909         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4910         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4911
4912         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4913 }
4914
4915 static void
4916 bnx2_init_all_rings(struct bnx2 *bp)
4917 {
4918         int i;
4919         u32 val;
4920
4921         bnx2_clear_ring_states(bp);
4922
4923         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4924         for (i = 0; i < bp->num_tx_rings; i++)
4925                 bnx2_init_tx_ring(bp, i);
4926
4927         if (bp->num_tx_rings > 1)
4928                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4929                        (TX_TSS_CID << 7));
4930
4931         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4932         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4933
4934         for (i = 0; i < bp->num_rx_rings; i++)
4935                 bnx2_init_rx_ring(bp, i);
4936
4937         if (bp->num_rx_rings > 1) {
4938                 u32 tbl_32;
4939                 u8 *tbl = (u8 *) &tbl_32;
4940
4941                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4942                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4943
4944                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4945                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
4946                         if ((i % 4) == 3)
4947                                 bnx2_reg_wr_ind(bp,
4948                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
4949                                                 cpu_to_be32(tbl_32));
4950                 }
4951
4952                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4953                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4954
4955                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4956
4957         }
4958 }
4959
4960 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4961 {
4962         u32 max, num_rings = 1;
4963
4964         while (ring_size > MAX_RX_DESC_CNT) {
4965                 ring_size -= MAX_RX_DESC_CNT;
4966                 num_rings++;
4967         }
4968         /* round to next power of 2 */
4969         max = max_size;
4970         while ((max & num_rings) == 0)
4971                 max >>= 1;
4972
4973         if (num_rings != max)
4974                 max <<= 1;
4975
4976         return max;
4977 }
4978
4979 static void
4980 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4981 {
4982         u32 rx_size, rx_space, jumbo_size;
4983
4984         /* 8 for CRC and VLAN */
4985         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4986
4987         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4988                 sizeof(struct skb_shared_info);
4989
4990         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4991         bp->rx_pg_ring_size = 0;
4992         bp->rx_max_pg_ring = 0;
4993         bp->rx_max_pg_ring_idx = 0;
4994         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4995                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4996
4997                 jumbo_size = size * pages;
4998                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4999                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5000
5001                 bp->rx_pg_ring_size = jumbo_size;
5002                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5003                                                         MAX_RX_PG_RINGS);
5004                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5005                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5006                 bp->rx_copy_thresh = 0;
5007         }
5008
5009         bp->rx_buf_use_size = rx_size;
5010         /* hw alignment */
5011         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5012         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5013         bp->rx_ring_size = size;
5014         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5015         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5016 }
5017
5018 static void
5019 bnx2_free_tx_skbs(struct bnx2 *bp)
5020 {
5021         int i;
5022
5023         for (i = 0; i < bp->num_tx_rings; i++) {
5024                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5025                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5026                 int j;
5027
5028                 if (txr->tx_buf_ring == NULL)
5029                         continue;
5030
5031                 for (j = 0; j < TX_DESC_CNT; ) {
5032                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5033                         struct sk_buff *skb = tx_buf->skb;
5034
5035                         if (skb == NULL) {
5036                                 j++;
5037                                 continue;
5038                         }
5039
5040                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5041
5042                         tx_buf->skb = NULL;
5043
5044                         j += skb_shinfo(skb)->nr_frags + 1;
5045                         dev_kfree_skb(skb);
5046                 }
5047         }
5048 }
5049
5050 static void
5051 bnx2_free_rx_skbs(struct bnx2 *bp)
5052 {
5053         int i;
5054
5055         for (i = 0; i < bp->num_rx_rings; i++) {
5056                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5057                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5058                 int j;
5059
5060                 if (rxr->rx_buf_ring == NULL)
5061                         return;
5062
5063                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5064                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5065                         struct sk_buff *skb = rx_buf->skb;
5066
5067                         if (skb == NULL)
5068                                 continue;
5069
5070                         pci_unmap_single(bp->pdev,
5071                                          pci_unmap_addr(rx_buf, mapping),
5072                                          bp->rx_buf_use_size,
5073                                          PCI_DMA_FROMDEVICE);
5074
5075                         rx_buf->skb = NULL;
5076
5077                         dev_kfree_skb(skb);
5078                 }
5079                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5080                         bnx2_free_rx_page(bp, rxr, j);
5081         }
5082 }
5083
5084 static void
5085 bnx2_free_skbs(struct bnx2 *bp)
5086 {
5087         bnx2_free_tx_skbs(bp);
5088         bnx2_free_rx_skbs(bp);
5089 }
5090
5091 static int
5092 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5093 {
5094         int rc;
5095
5096         rc = bnx2_reset_chip(bp, reset_code);
5097         bnx2_free_skbs(bp);
5098         if (rc)
5099                 return rc;
5100
5101         if ((rc = bnx2_init_chip(bp)) != 0)
5102                 return rc;
5103
5104         bnx2_init_all_rings(bp);
5105         return 0;
5106 }
5107
5108 static int
5109 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5110 {
5111         int rc;
5112
5113         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5114                 return rc;
5115
5116         spin_lock_bh(&bp->phy_lock);
5117         bnx2_init_phy(bp, reset_phy);
5118         bnx2_set_link(bp);
5119         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5120                 bnx2_remote_phy_event(bp);
5121         spin_unlock_bh(&bp->phy_lock);
5122         return 0;
5123 }
5124
5125 static int
5126 bnx2_shutdown_chip(struct bnx2 *bp)
5127 {
5128         u32 reset_code;
5129
5130         if (bp->flags & BNX2_FLAG_NO_WOL)
5131                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5132         else if (bp->wol)
5133                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5134         else
5135                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5136
5137         return bnx2_reset_chip(bp, reset_code);
5138 }
5139
5140 static int
5141 bnx2_test_registers(struct bnx2 *bp)
5142 {
5143         int ret;
5144         int i, is_5709;
5145         static const struct {
5146                 u16   offset;
5147                 u16   flags;
5148 #define BNX2_FL_NOT_5709        1
5149                 u32   rw_mask;
5150                 u32   ro_mask;
5151         } reg_tbl[] = {
5152                 { 0x006c, 0, 0x00000000, 0x0000003f },
5153                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5154                 { 0x0094, 0, 0x00000000, 0x00000000 },
5155
5156                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5157                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5158                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5159                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5160                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5161                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5162                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5163                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5164                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5165
5166                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5167                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5168                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5169                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5170                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5171                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5172
5173                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5174                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5175                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5176
5177                 { 0x1000, 0, 0x00000000, 0x00000001 },
5178                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5179
5180                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5181                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5182                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5183                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5184                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5185                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5186                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5187                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5188                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5189                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5190
5191                 { 0x1800, 0, 0x00000000, 0x00000001 },
5192                 { 0x1804, 0, 0x00000000, 0x00000003 },
5193
5194                 { 0x2800, 0, 0x00000000, 0x00000001 },
5195                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5196                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5197                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5198                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5199                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5200                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5201                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5202                 { 0x2840, 0, 0x00000000, 0xffffffff },
5203                 { 0x2844, 0, 0x00000000, 0xffffffff },
5204                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5205                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5206
5207                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5208                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5209
5210                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5211                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5212                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5213                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5214                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5215                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5216                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5217                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5218                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5219
5220                 { 0x5004, 0, 0x00000000, 0x0000007f },
5221                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5222
5223                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5224                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5225                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5226                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5227                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5228                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5229                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5230                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5231                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5232
5233                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5234                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5235                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5236                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5237                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5238                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5239                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5240                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5241                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5242                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5243                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5244                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5245                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5246                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5247                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5248                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5249                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5250                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5251                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5252                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5253                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5254                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5255                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5256
5257                 { 0xffff, 0, 0x00000000, 0x00000000 },
5258         };
5259
5260         ret = 0;
5261         is_5709 = 0;
5262         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5263                 is_5709 = 1;
5264
5265         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5266                 u32 offset, rw_mask, ro_mask, save_val, val;
5267                 u16 flags = reg_tbl[i].flags;
5268
5269                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5270                         continue;
5271
5272                 offset = (u32) reg_tbl[i].offset;
5273                 rw_mask = reg_tbl[i].rw_mask;
5274                 ro_mask = reg_tbl[i].ro_mask;
5275
5276                 save_val = readl(bp->regview + offset);
5277
5278                 writel(0, bp->regview + offset);
5279
5280                 val = readl(bp->regview + offset);
5281                 if ((val & rw_mask) != 0) {
5282                         goto reg_test_err;
5283                 }
5284
5285                 if ((val & ro_mask) != (save_val & ro_mask)) {
5286                         goto reg_test_err;
5287                 }
5288
5289                 writel(0xffffffff, bp->regview + offset);
5290
5291                 val = readl(bp->regview + offset);
5292                 if ((val & rw_mask) != rw_mask) {
5293                         goto reg_test_err;
5294                 }
5295
5296                 if ((val & ro_mask) != (save_val & ro_mask)) {
5297                         goto reg_test_err;
5298                 }
5299
5300                 writel(save_val, bp->regview + offset);
5301                 continue;
5302
5303 reg_test_err:
5304                 writel(save_val, bp->regview + offset);
5305                 ret = -ENODEV;
5306                 break;
5307         }
5308         return ret;
5309 }
5310
5311 static int
5312 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5313 {
5314         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5315                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5316         int i;
5317
5318         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5319                 u32 offset;
5320
5321                 for (offset = 0; offset < size; offset += 4) {
5322
5323                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5324
5325                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5326                                 test_pattern[i]) {
5327                                 return -ENODEV;
5328                         }
5329                 }
5330         }
5331         return 0;
5332 }
5333
5334 static int
5335 bnx2_test_memory(struct bnx2 *bp)
5336 {
5337         int ret = 0;
5338         int i;
5339         static struct mem_entry {
5340                 u32   offset;
5341                 u32   len;
5342         } mem_tbl_5706[] = {
5343                 { 0x60000,  0x4000 },
5344                 { 0xa0000,  0x3000 },
5345                 { 0xe0000,  0x4000 },
5346                 { 0x120000, 0x4000 },
5347                 { 0x1a0000, 0x4000 },
5348                 { 0x160000, 0x4000 },
5349                 { 0xffffffff, 0    },
5350         },
5351         mem_tbl_5709[] = {
5352                 { 0x60000,  0x4000 },
5353                 { 0xa0000,  0x3000 },
5354                 { 0xe0000,  0x4000 },
5355                 { 0x120000, 0x4000 },
5356                 { 0x1a0000, 0x4000 },
5357                 { 0xffffffff, 0    },
5358         };
5359         struct mem_entry *mem_tbl;
5360
5361         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5362                 mem_tbl = mem_tbl_5709;
5363         else
5364                 mem_tbl = mem_tbl_5706;
5365
5366         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5367                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5368                         mem_tbl[i].len)) != 0) {
5369                         return ret;
5370                 }
5371         }
5372
5373         return ret;
5374 }
5375
5376 #define BNX2_MAC_LOOPBACK       0
5377 #define BNX2_PHY_LOOPBACK       1
5378
5379 static int
5380 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5381 {
5382         unsigned int pkt_size, num_pkts, i;
5383         struct sk_buff *skb, *rx_skb;
5384         unsigned char *packet;
5385         u16 rx_start_idx, rx_idx;
5386         dma_addr_t map;
5387         struct tx_bd *txbd;
5388         struct sw_bd *rx_buf;
5389         struct l2_fhdr *rx_hdr;
5390         int ret = -ENODEV;
5391         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5392         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5393         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5394
5395         tx_napi = bnapi;
5396
5397         txr = &tx_napi->tx_ring;
5398         rxr = &bnapi->rx_ring;
5399         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5400                 bp->loopback = MAC_LOOPBACK;
5401                 bnx2_set_mac_loopback(bp);
5402         }
5403         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5404                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5405                         return 0;
5406
5407                 bp->loopback = PHY_LOOPBACK;
5408                 bnx2_set_phy_loopback(bp);
5409         }
5410         else
5411                 return -EINVAL;
5412
5413         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5414         skb = netdev_alloc_skb(bp->dev, pkt_size);
5415         if (!skb)
5416                 return -ENOMEM;
5417         packet = skb_put(skb, pkt_size);
5418         memcpy(packet, bp->dev->dev_addr, 6);
5419         memset(packet + 6, 0x0, 8);
5420         for (i = 14; i < pkt_size; i++)
5421                 packet[i] = (unsigned char) (i & 0xff);
5422
5423         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5424                 dev_kfree_skb(skb);
5425                 return -EIO;
5426         }
5427         map = skb_shinfo(skb)->dma_maps[0];
5428
5429         REG_WR(bp, BNX2_HC_COMMAND,
5430                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5431
5432         REG_RD(bp, BNX2_HC_COMMAND);
5433
5434         udelay(5);
5435         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5436
5437         num_pkts = 0;
5438
5439         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5440
5441         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5442         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5443         txbd->tx_bd_mss_nbytes = pkt_size;
5444         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5445
5446         num_pkts++;
5447         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5448         txr->tx_prod_bseq += pkt_size;
5449
5450         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5451         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5452
5453         udelay(100);
5454
5455         REG_WR(bp, BNX2_HC_COMMAND,
5456                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5457
5458         REG_RD(bp, BNX2_HC_COMMAND);
5459
5460         udelay(5);
5461
5462         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5463         dev_kfree_skb(skb);
5464
5465         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5466                 goto loopback_test_done;
5467
5468         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5469         if (rx_idx != rx_start_idx + num_pkts) {
5470                 goto loopback_test_done;
5471         }
5472
5473         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5474         rx_skb = rx_buf->skb;
5475
5476         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5477         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5478
5479         pci_dma_sync_single_for_cpu(bp->pdev,
5480                 pci_unmap_addr(rx_buf, mapping),
5481                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5482
5483         if (rx_hdr->l2_fhdr_status &
5484                 (L2_FHDR_ERRORS_BAD_CRC |
5485                 L2_FHDR_ERRORS_PHY_DECODE |
5486                 L2_FHDR_ERRORS_ALIGNMENT |
5487                 L2_FHDR_ERRORS_TOO_SHORT |
5488                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5489
5490                 goto loopback_test_done;
5491         }
5492
5493         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5494                 goto loopback_test_done;
5495         }
5496
5497         for (i = 14; i < pkt_size; i++) {
5498                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5499                         goto loopback_test_done;
5500                 }
5501         }
5502
5503         ret = 0;
5504
5505 loopback_test_done:
5506         bp->loopback = 0;
5507         return ret;
5508 }
5509
5510 #define BNX2_MAC_LOOPBACK_FAILED        1
5511 #define BNX2_PHY_LOOPBACK_FAILED        2
5512 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5513                                          BNX2_PHY_LOOPBACK_FAILED)
5514
5515 static int
5516 bnx2_test_loopback(struct bnx2 *bp)
5517 {
5518         int rc = 0;
5519
5520         if (!netif_running(bp->dev))
5521                 return BNX2_LOOPBACK_FAILED;
5522
5523         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5524         spin_lock_bh(&bp->phy_lock);
5525         bnx2_init_phy(bp, 1);
5526         spin_unlock_bh(&bp->phy_lock);
5527         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5528                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5529         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5530                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5531         return rc;
5532 }
5533
5534 #define NVRAM_SIZE 0x200
5535 #define CRC32_RESIDUAL 0xdebb20e3
5536
5537 static int
5538 bnx2_test_nvram(struct bnx2 *bp)
5539 {
5540         __be32 buf[NVRAM_SIZE / 4];
5541         u8 *data = (u8 *) buf;
5542         int rc = 0;
5543         u32 magic, csum;
5544
5545         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5546                 goto test_nvram_done;
5547
5548         magic = be32_to_cpu(buf[0]);
5549         if (magic != 0x669955aa) {
5550                 rc = -ENODEV;
5551                 goto test_nvram_done;
5552         }
5553
5554         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5555                 goto test_nvram_done;
5556
5557         csum = ether_crc_le(0x100, data);
5558         if (csum != CRC32_RESIDUAL) {
5559                 rc = -ENODEV;
5560                 goto test_nvram_done;
5561         }
5562
5563         csum = ether_crc_le(0x100, data + 0x100);
5564         if (csum != CRC32_RESIDUAL) {
5565                 rc = -ENODEV;
5566         }
5567
5568 test_nvram_done:
5569         return rc;
5570 }
5571
5572 static int
5573 bnx2_test_link(struct bnx2 *bp)
5574 {
5575         u32 bmsr;
5576
5577         if (!netif_running(bp->dev))
5578                 return -ENODEV;
5579
5580         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5581                 if (bp->link_up)
5582                         return 0;
5583                 return -ENODEV;
5584         }
5585         spin_lock_bh(&bp->phy_lock);
5586         bnx2_enable_bmsr1(bp);
5587         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5588         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5589         bnx2_disable_bmsr1(bp);
5590         spin_unlock_bh(&bp->phy_lock);
5591
5592         if (bmsr & BMSR_LSTATUS) {
5593                 return 0;
5594         }
5595         return -ENODEV;
5596 }
5597
5598 static int
5599 bnx2_test_intr(struct bnx2 *bp)
5600 {
5601         int i;
5602         u16 status_idx;
5603
5604         if (!netif_running(bp->dev))
5605                 return -ENODEV;
5606
5607         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5608
5609         /* This register is not touched during run-time. */
5610         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5611         REG_RD(bp, BNX2_HC_COMMAND);
5612
5613         for (i = 0; i < 10; i++) {
5614                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5615                         status_idx) {
5616
5617                         break;
5618                 }
5619
5620                 msleep_interruptible(10);
5621         }
5622         if (i < 10)
5623                 return 0;
5624
5625         return -ENODEV;
5626 }
5627
5628 /* Determining link for parallel detection. */
5629 static int
5630 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5631 {
5632         u32 mode_ctl, an_dbg, exp;
5633
5634         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5635                 return 0;
5636
5637         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5638         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5639
5640         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5641                 return 0;
5642
5643         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5644         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5645         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5646
5647         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5648                 return 0;
5649
5650         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5651         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5652         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5653
5654         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5655                 return 0;
5656
5657         return 1;
5658 }
5659
5660 static void
5661 bnx2_5706_serdes_timer(struct bnx2 *bp)
5662 {
5663         int check_link = 1;
5664
5665         spin_lock(&bp->phy_lock);
5666         if (bp->serdes_an_pending) {
5667                 bp->serdes_an_pending--;
5668                 check_link = 0;
5669         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5670                 u32 bmcr;
5671
5672                 bp->current_interval = BNX2_TIMER_INTERVAL;
5673
5674                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5675
5676                 if (bmcr & BMCR_ANENABLE) {
5677                         if (bnx2_5706_serdes_has_link(bp)) {
5678                                 bmcr &= ~BMCR_ANENABLE;
5679                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5680                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5681                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5682                         }
5683                 }
5684         }
5685         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5686                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5687                 u32 phy2;
5688
5689                 bnx2_write_phy(bp, 0x17, 0x0f01);
5690                 bnx2_read_phy(bp, 0x15, &phy2);
5691                 if (phy2 & 0x20) {
5692                         u32 bmcr;
5693
5694                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5695                         bmcr |= BMCR_ANENABLE;
5696                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5697
5698                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5699                 }
5700         } else
5701                 bp->current_interval = BNX2_TIMER_INTERVAL;
5702
5703         if (check_link) {
5704                 u32 val;
5705
5706                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5707                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5708                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5709
5710                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5711                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5712                                 bnx2_5706s_force_link_dn(bp, 1);
5713                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5714                         } else
5715                                 bnx2_set_link(bp);
5716                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5717                         bnx2_set_link(bp);
5718         }
5719         spin_unlock(&bp->phy_lock);
5720 }
5721
5722 static void
5723 bnx2_5708_serdes_timer(struct bnx2 *bp)
5724 {
5725         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5726                 return;
5727
5728         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5729                 bp->serdes_an_pending = 0;
5730                 return;
5731         }
5732
5733         spin_lock(&bp->phy_lock);
5734         if (bp->serdes_an_pending)
5735                 bp->serdes_an_pending--;
5736         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5737                 u32 bmcr;
5738
5739                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5740                 if (bmcr & BMCR_ANENABLE) {
5741                         bnx2_enable_forced_2g5(bp);
5742                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5743                 } else {
5744                         bnx2_disable_forced_2g5(bp);
5745                         bp->serdes_an_pending = 2;
5746                         bp->current_interval = BNX2_TIMER_INTERVAL;
5747                 }
5748
5749         } else
5750                 bp->current_interval = BNX2_TIMER_INTERVAL;
5751
5752         spin_unlock(&bp->phy_lock);
5753 }
5754
5755 static void
5756 bnx2_timer(unsigned long data)
5757 {
5758         struct bnx2 *bp = (struct bnx2 *) data;
5759
5760         if (!netif_running(bp->dev))
5761                 return;
5762
5763         if (atomic_read(&bp->intr_sem) != 0)
5764                 goto bnx2_restart_timer;
5765
5766         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5767              BNX2_FLAG_USING_MSI)
5768                 bnx2_chk_missed_msi(bp);
5769
5770         bnx2_send_heart_beat(bp);
5771
5772         bp->stats_blk->stat_FwRxDrop =
5773                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5774
5775         /* workaround occasional corrupted counters */
5776         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5777                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5778                                             BNX2_HC_COMMAND_STATS_NOW);
5779
5780         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5781                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5782                         bnx2_5706_serdes_timer(bp);
5783                 else
5784                         bnx2_5708_serdes_timer(bp);
5785         }
5786
5787 bnx2_restart_timer:
5788         mod_timer(&bp->timer, jiffies + bp->current_interval);
5789 }
5790
5791 static int
5792 bnx2_request_irq(struct bnx2 *bp)
5793 {
5794         unsigned long flags;
5795         struct bnx2_irq *irq;
5796         int rc = 0, i;
5797
5798         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5799                 flags = 0;
5800         else
5801                 flags = IRQF_SHARED;
5802
5803         for (i = 0; i < bp->irq_nvecs; i++) {
5804                 irq = &bp->irq_tbl[i];
5805                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5806                                  &bp->bnx2_napi[i]);
5807                 if (rc)
5808                         break;
5809                 irq->requested = 1;
5810         }
5811         return rc;
5812 }
5813
5814 static void
5815 bnx2_free_irq(struct bnx2 *bp)
5816 {
5817         struct bnx2_irq *irq;
5818         int i;
5819
5820         for (i = 0; i < bp->irq_nvecs; i++) {
5821                 irq = &bp->irq_tbl[i];
5822                 if (irq->requested)
5823                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5824                 irq->requested = 0;
5825         }
5826         if (bp->flags & BNX2_FLAG_USING_MSI)
5827                 pci_disable_msi(bp->pdev);
5828         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5829                 pci_disable_msix(bp->pdev);
5830
5831         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5832 }
5833
5834 static void
5835 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5836 {
5837         int i, rc;
5838         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5839         struct net_device *dev = bp->dev;
5840         const int len = sizeof(bp->irq_tbl[0].name);
5841
5842         bnx2_setup_msix_tbl(bp);
5843         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5844         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5845         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5846
5847         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5848                 msix_ent[i].entry = i;
5849                 msix_ent[i].vector = 0;
5850
5851                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5852                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5853         }
5854
5855         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5856         if (rc != 0)
5857                 return;
5858
5859         bp->irq_nvecs = msix_vecs;
5860         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5861         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5862                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5863 }
5864
5865 static void
5866 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5867 {
5868         int cpus = num_online_cpus();
5869         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5870
5871         bp->irq_tbl[0].handler = bnx2_interrupt;
5872         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5873         bp->irq_nvecs = 1;
5874         bp->irq_tbl[0].vector = bp->pdev->irq;
5875
5876         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5877                 bnx2_enable_msix(bp, msix_vecs);
5878
5879         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5880             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5881                 if (pci_enable_msi(bp->pdev) == 0) {
5882                         bp->flags |= BNX2_FLAG_USING_MSI;
5883                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5884                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5885                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5886                         } else
5887                                 bp->irq_tbl[0].handler = bnx2_msi;
5888
5889                         bp->irq_tbl[0].vector = bp->pdev->irq;
5890                 }
5891         }
5892
5893         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5894         bp->dev->real_num_tx_queues = bp->num_tx_rings;
5895
5896         bp->num_rx_rings = bp->irq_nvecs;
5897 }
5898
5899 /* Called with rtnl_lock */
5900 static int
5901 bnx2_open(struct net_device *dev)
5902 {
5903         struct bnx2 *bp = netdev_priv(dev);
5904         int rc;
5905
5906         netif_carrier_off(dev);
5907
5908         bnx2_set_power_state(bp, PCI_D0);
5909         bnx2_disable_int(bp);
5910
5911         bnx2_setup_int_mode(bp, disable_msi);
5912         bnx2_napi_enable(bp);
5913         rc = bnx2_alloc_mem(bp);
5914         if (rc)
5915                 goto open_err;
5916
5917         rc = bnx2_request_irq(bp);
5918         if (rc)
5919                 goto open_err;
5920
5921         rc = bnx2_init_nic(bp, 1);
5922         if (rc)
5923                 goto open_err;
5924
5925         mod_timer(&bp->timer, jiffies + bp->current_interval);
5926
5927         atomic_set(&bp->intr_sem, 0);
5928
5929         bnx2_enable_int(bp);
5930
5931         if (bp->flags & BNX2_FLAG_USING_MSI) {
5932                 /* Test MSI to make sure it is working
5933                  * If MSI test fails, go back to INTx mode
5934                  */
5935                 if (bnx2_test_intr(bp) != 0) {
5936                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5937                                " using MSI, switching to INTx mode. Please"
5938                                " report this failure to the PCI maintainer"
5939                                " and include system chipset information.\n",
5940                                bp->dev->name);
5941
5942                         bnx2_disable_int(bp);
5943                         bnx2_free_irq(bp);
5944
5945                         bnx2_setup_int_mode(bp, 1);
5946
5947                         rc = bnx2_init_nic(bp, 0);
5948
5949                         if (!rc)
5950                                 rc = bnx2_request_irq(bp);
5951
5952                         if (rc) {
5953                                 del_timer_sync(&bp->timer);
5954                                 goto open_err;
5955                         }
5956                         bnx2_enable_int(bp);
5957                 }
5958         }
5959         if (bp->flags & BNX2_FLAG_USING_MSI)
5960                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5961         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5962                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5963
5964         netif_tx_start_all_queues(dev);
5965
5966         return 0;
5967
5968 open_err:
5969         bnx2_napi_disable(bp);
5970         bnx2_free_skbs(bp);
5971         bnx2_free_irq(bp);
5972         bnx2_free_mem(bp);
5973         return rc;
5974 }
5975
5976 static void
5977 bnx2_reset_task(struct work_struct *work)
5978 {
5979         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5980
5981         if (!netif_running(bp->dev))
5982                 return;
5983
5984         bnx2_netif_stop(bp);
5985
5986         bnx2_init_nic(bp, 1);
5987
5988         atomic_set(&bp->intr_sem, 1);
5989         bnx2_netif_start(bp);
5990 }
5991
5992 static void
5993 bnx2_tx_timeout(struct net_device *dev)
5994 {
5995         struct bnx2 *bp = netdev_priv(dev);
5996
5997         /* This allows the netif to be shutdown gracefully before resetting */
5998         schedule_work(&bp->reset_task);
5999 }
6000
6001 #ifdef BCM_VLAN
6002 /* Called with rtnl_lock */
6003 static void
6004 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6005 {
6006         struct bnx2 *bp = netdev_priv(dev);
6007
6008         bnx2_netif_stop(bp);
6009
6010         bp->vlgrp = vlgrp;
6011         bnx2_set_rx_mode(dev);
6012         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6013                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6014
6015         bnx2_netif_start(bp);
6016 }
6017 #endif
6018
6019 /* Called with netif_tx_lock.
6020  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6021  * netif_wake_queue().
6022  */
6023 static int
6024 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6025 {
6026         struct bnx2 *bp = netdev_priv(dev);
6027         dma_addr_t mapping;
6028         struct tx_bd *txbd;
6029         struct sw_tx_bd *tx_buf;
6030         u32 len, vlan_tag_flags, last_frag, mss;
6031         u16 prod, ring_prod;
6032         int i;
6033         struct bnx2_napi *bnapi;
6034         struct bnx2_tx_ring_info *txr;
6035         struct netdev_queue *txq;
6036         struct skb_shared_info *sp;
6037
6038         /*  Determine which tx ring we will be placed on */
6039         i = skb_get_queue_mapping(skb);
6040         bnapi = &bp->bnx2_napi[i];
6041         txr = &bnapi->tx_ring;
6042         txq = netdev_get_tx_queue(dev, i);
6043
6044         if (unlikely(bnx2_tx_avail(bp, txr) <
6045             (skb_shinfo(skb)->nr_frags + 1))) {
6046                 netif_tx_stop_queue(txq);
6047                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6048                         dev->name);
6049
6050                 return NETDEV_TX_BUSY;
6051         }
6052         len = skb_headlen(skb);
6053         prod = txr->tx_prod;
6054         ring_prod = TX_RING_IDX(prod);
6055
6056         vlan_tag_flags = 0;
6057         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6058                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6059         }
6060
6061 #ifdef BCM_VLAN
6062         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6063                 vlan_tag_flags |=
6064                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6065         }
6066 #endif
6067         if ((mss = skb_shinfo(skb)->gso_size)) {
6068                 u32 tcp_opt_len;
6069                 struct iphdr *iph;
6070
6071                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6072
6073                 tcp_opt_len = tcp_optlen(skb);
6074
6075                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6076                         u32 tcp_off = skb_transport_offset(skb) -
6077                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6078
6079                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6080                                           TX_BD_FLAGS_SW_FLAGS;
6081                         if (likely(tcp_off == 0))
6082                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6083                         else {
6084                                 tcp_off >>= 3;
6085                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6086                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6087                                                   ((tcp_off & 0x10) <<
6088                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6089                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6090                         }
6091                 } else {
6092                         iph = ip_hdr(skb);
6093                         if (tcp_opt_len || (iph->ihl > 5)) {
6094                                 vlan_tag_flags |= ((iph->ihl - 5) +
6095                                                    (tcp_opt_len >> 2)) << 8;
6096                         }
6097                 }
6098         } else
6099                 mss = 0;
6100
6101         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6102                 dev_kfree_skb(skb);
6103                 return NETDEV_TX_OK;
6104         }
6105
6106         sp = skb_shinfo(skb);
6107         mapping = sp->dma_maps[0];
6108
6109         tx_buf = &txr->tx_buf_ring[ring_prod];
6110         tx_buf->skb = skb;
6111
6112         txbd = &txr->tx_desc_ring[ring_prod];
6113
6114         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6115         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6116         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6117         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6118
6119         last_frag = skb_shinfo(skb)->nr_frags;
6120
6121         for (i = 0; i < last_frag; i++) {
6122                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6123
6124                 prod = NEXT_TX_BD(prod);
6125                 ring_prod = TX_RING_IDX(prod);
6126                 txbd = &txr->tx_desc_ring[ring_prod];
6127
6128                 len = frag->size;
6129                 mapping = sp->dma_maps[i + 1];
6130
6131                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6132                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6133                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6134                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6135
6136         }
6137         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6138
6139         prod = NEXT_TX_BD(prod);
6140         txr->tx_prod_bseq += skb->len;
6141
6142         REG_WR16(bp, txr->tx_bidx_addr, prod);
6143         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6144
6145         mmiowb();
6146
6147         txr->tx_prod = prod;
6148         dev->trans_start = jiffies;
6149
6150         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6151                 netif_tx_stop_queue(txq);
6152                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6153                         netif_tx_wake_queue(txq);
6154         }
6155
6156         return NETDEV_TX_OK;
6157 }
6158
6159 /* Called with rtnl_lock */
6160 static int
6161 bnx2_close(struct net_device *dev)
6162 {
6163         struct bnx2 *bp = netdev_priv(dev);
6164
6165         cancel_work_sync(&bp->reset_task);
6166
6167         bnx2_disable_int_sync(bp);
6168         bnx2_napi_disable(bp);
6169         del_timer_sync(&bp->timer);
6170         bnx2_shutdown_chip(bp);
6171         bnx2_free_irq(bp);
6172         bnx2_free_skbs(bp);
6173         bnx2_free_mem(bp);
6174         bp->link_up = 0;
6175         netif_carrier_off(bp->dev);
6176         bnx2_set_power_state(bp, PCI_D3hot);
6177         return 0;
6178 }
6179
6180 #define GET_NET_STATS64(ctr)                                    \
6181         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6182         (unsigned long) (ctr##_lo)
6183
6184 #define GET_NET_STATS32(ctr)            \
6185         (ctr##_lo)
6186
6187 #if (BITS_PER_LONG == 64)
6188 #define GET_NET_STATS   GET_NET_STATS64
6189 #else
6190 #define GET_NET_STATS   GET_NET_STATS32
6191 #endif
6192
6193 static struct net_device_stats *
6194 bnx2_get_stats(struct net_device *dev)
6195 {
6196         struct bnx2 *bp = netdev_priv(dev);
6197         struct statistics_block *stats_blk = bp->stats_blk;
6198         struct net_device_stats *net_stats = &dev->stats;
6199
6200         if (bp->stats_blk == NULL) {
6201                 return net_stats;
6202         }
6203         net_stats->rx_packets =
6204                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6205                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6206                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6207
6208         net_stats->tx_packets =
6209                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6210                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6211                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6212
6213         net_stats->rx_bytes =
6214                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6215
6216         net_stats->tx_bytes =
6217                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6218
6219         net_stats->multicast =
6220                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6221
6222         net_stats->collisions =
6223                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6224
6225         net_stats->rx_length_errors =
6226                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6227                 stats_blk->stat_EtherStatsOverrsizePkts);
6228
6229         net_stats->rx_over_errors =
6230                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6231
6232         net_stats->rx_frame_errors =
6233                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6234
6235         net_stats->rx_crc_errors =
6236                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6237
6238         net_stats->rx_errors = net_stats->rx_length_errors +
6239                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6240                 net_stats->rx_crc_errors;
6241
6242         net_stats->tx_aborted_errors =
6243                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6244                 stats_blk->stat_Dot3StatsLateCollisions);
6245
6246         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6247             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6248                 net_stats->tx_carrier_errors = 0;
6249         else {
6250                 net_stats->tx_carrier_errors =
6251                         (unsigned long)
6252                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6253         }
6254
6255         net_stats->tx_errors =
6256                 (unsigned long)
6257                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6258                 +
6259                 net_stats->tx_aborted_errors +
6260                 net_stats->tx_carrier_errors;
6261
6262         net_stats->rx_missed_errors =
6263                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6264                 stats_blk->stat_FwRxDrop);
6265
6266         return net_stats;
6267 }
6268
6269 /* All ethtool functions called with rtnl_lock */
6270
6271 static int
6272 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6273 {
6274         struct bnx2 *bp = netdev_priv(dev);
6275         int support_serdes = 0, support_copper = 0;
6276
6277         cmd->supported = SUPPORTED_Autoneg;
6278         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6279                 support_serdes = 1;
6280                 support_copper = 1;
6281         } else if (bp->phy_port == PORT_FIBRE)
6282                 support_serdes = 1;
6283         else
6284                 support_copper = 1;
6285
6286         if (support_serdes) {
6287                 cmd->supported |= SUPPORTED_1000baseT_Full |
6288                         SUPPORTED_FIBRE;
6289                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6290                         cmd->supported |= SUPPORTED_2500baseX_Full;
6291
6292         }
6293         if (support_copper) {
6294                 cmd->supported |= SUPPORTED_10baseT_Half |
6295                         SUPPORTED_10baseT_Full |
6296                         SUPPORTED_100baseT_Half |
6297                         SUPPORTED_100baseT_Full |
6298                         SUPPORTED_1000baseT_Full |
6299                         SUPPORTED_TP;
6300
6301         }
6302
6303         spin_lock_bh(&bp->phy_lock);
6304         cmd->port = bp->phy_port;
6305         cmd->advertising = bp->advertising;
6306
6307         if (bp->autoneg & AUTONEG_SPEED) {
6308                 cmd->autoneg = AUTONEG_ENABLE;
6309         }
6310         else {
6311                 cmd->autoneg = AUTONEG_DISABLE;
6312         }
6313
6314         if (netif_carrier_ok(dev)) {
6315                 cmd->speed = bp->line_speed;
6316                 cmd->duplex = bp->duplex;
6317         }
6318         else {
6319                 cmd->speed = -1;
6320                 cmd->duplex = -1;
6321         }
6322         spin_unlock_bh(&bp->phy_lock);
6323
6324         cmd->transceiver = XCVR_INTERNAL;
6325         cmd->phy_address = bp->phy_addr;
6326
6327         return 0;
6328 }
6329
6330 static int
6331 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6332 {
6333         struct bnx2 *bp = netdev_priv(dev);
6334         u8 autoneg = bp->autoneg;
6335         u8 req_duplex = bp->req_duplex;
6336         u16 req_line_speed = bp->req_line_speed;
6337         u32 advertising = bp->advertising;
6338         int err = -EINVAL;
6339
6340         spin_lock_bh(&bp->phy_lock);
6341
6342         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6343                 goto err_out_unlock;
6344
6345         if (cmd->port != bp->phy_port &&
6346             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6347                 goto err_out_unlock;
6348
6349         /* If device is down, we can store the settings only if the user
6350          * is setting the currently active port.
6351          */
6352         if (!netif_running(dev) && cmd->port != bp->phy_port)
6353                 goto err_out_unlock;
6354
6355         if (cmd->autoneg == AUTONEG_ENABLE) {
6356                 autoneg |= AUTONEG_SPEED;
6357
6358                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6359
6360                 /* allow advertising 1 speed */
6361                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6362                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6363                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6364                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6365
6366                         if (cmd->port == PORT_FIBRE)
6367                                 goto err_out_unlock;
6368
6369                         advertising = cmd->advertising;
6370
6371                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6372                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6373                             (cmd->port == PORT_TP))
6374                                 goto err_out_unlock;
6375                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6376                         advertising = cmd->advertising;
6377                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6378                         goto err_out_unlock;
6379                 else {
6380                         if (cmd->port == PORT_FIBRE)
6381                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6382                         else
6383                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6384                 }
6385                 advertising |= ADVERTISED_Autoneg;
6386         }
6387         else {
6388                 if (cmd->port == PORT_FIBRE) {
6389                         if ((cmd->speed != SPEED_1000 &&
6390                              cmd->speed != SPEED_2500) ||
6391                             (cmd->duplex != DUPLEX_FULL))
6392                                 goto err_out_unlock;
6393
6394                         if (cmd->speed == SPEED_2500 &&
6395                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6396                                 goto err_out_unlock;
6397                 }
6398                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6399                         goto err_out_unlock;
6400
6401                 autoneg &= ~AUTONEG_SPEED;
6402                 req_line_speed = cmd->speed;
6403                 req_duplex = cmd->duplex;
6404                 advertising = 0;
6405         }
6406
6407         bp->autoneg = autoneg;
6408         bp->advertising = advertising;
6409         bp->req_line_speed = req_line_speed;
6410         bp->req_duplex = req_duplex;
6411
6412         err = 0;
6413         /* If device is down, the new settings will be picked up when it is
6414          * brought up.
6415          */
6416         if (netif_running(dev))
6417                 err = bnx2_setup_phy(bp, cmd->port);
6418
6419 err_out_unlock:
6420         spin_unlock_bh(&bp->phy_lock);
6421
6422         return err;
6423 }
6424
6425 static void
6426 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6427 {
6428         struct bnx2 *bp = netdev_priv(dev);
6429
6430         strcpy(info->driver, DRV_MODULE_NAME);
6431         strcpy(info->version, DRV_MODULE_VERSION);
6432         strcpy(info->bus_info, pci_name(bp->pdev));
6433         strcpy(info->fw_version, bp->fw_version);
6434 }
6435
6436 #define BNX2_REGDUMP_LEN                (32 * 1024)
6437
6438 static int
6439 bnx2_get_regs_len(struct net_device *dev)
6440 {
6441         return BNX2_REGDUMP_LEN;
6442 }
6443
6444 static void
6445 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6446 {
6447         u32 *p = _p, i, offset;
6448         u8 *orig_p = _p;
6449         struct bnx2 *bp = netdev_priv(dev);
6450         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6451                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6452                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6453                                  0x1040, 0x1048, 0x1080, 0x10a4,
6454                                  0x1400, 0x1490, 0x1498, 0x14f0,
6455                                  0x1500, 0x155c, 0x1580, 0x15dc,
6456                                  0x1600, 0x1658, 0x1680, 0x16d8,
6457                                  0x1800, 0x1820, 0x1840, 0x1854,
6458                                  0x1880, 0x1894, 0x1900, 0x1984,
6459                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6460                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6461                                  0x2000, 0x2030, 0x23c0, 0x2400,
6462                                  0x2800, 0x2820, 0x2830, 0x2850,
6463                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6464                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6465                                  0x4080, 0x4090, 0x43c0, 0x4458,
6466                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6467                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6468                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6469                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6470                                  0x6800, 0x6848, 0x684c, 0x6860,
6471                                  0x6888, 0x6910, 0x8000 };
6472
6473         regs->version = 0;
6474
6475         memset(p, 0, BNX2_REGDUMP_LEN);
6476
6477         if (!netif_running(bp->dev))
6478                 return;
6479
6480         i = 0;
6481         offset = reg_boundaries[0];
6482         p += offset;
6483         while (offset < BNX2_REGDUMP_LEN) {
6484                 *p++ = REG_RD(bp, offset);
6485                 offset += 4;
6486                 if (offset == reg_boundaries[i + 1]) {
6487                         offset = reg_boundaries[i + 2];
6488                         p = (u32 *) (orig_p + offset);
6489                         i += 2;
6490                 }
6491         }
6492 }
6493
6494 static void
6495 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6496 {
6497         struct bnx2 *bp = netdev_priv(dev);
6498
6499         if (bp->flags & BNX2_FLAG_NO_WOL) {
6500                 wol->supported = 0;
6501                 wol->wolopts = 0;
6502         }
6503         else {
6504                 wol->supported = WAKE_MAGIC;
6505                 if (bp->wol)
6506                         wol->wolopts = WAKE_MAGIC;
6507                 else
6508                         wol->wolopts = 0;
6509         }
6510         memset(&wol->sopass, 0, sizeof(wol->sopass));
6511 }
6512
6513 static int
6514 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6515 {
6516         struct bnx2 *bp = netdev_priv(dev);
6517
6518         if (wol->wolopts & ~WAKE_MAGIC)
6519                 return -EINVAL;
6520
6521         if (wol->wolopts & WAKE_MAGIC) {
6522                 if (bp->flags & BNX2_FLAG_NO_WOL)
6523                         return -EINVAL;
6524
6525                 bp->wol = 1;
6526         }
6527         else {
6528                 bp->wol = 0;
6529         }
6530         return 0;
6531 }
6532
6533 static int
6534 bnx2_nway_reset(struct net_device *dev)
6535 {
6536         struct bnx2 *bp = netdev_priv(dev);
6537         u32 bmcr;
6538
6539         if (!netif_running(dev))
6540                 return -EAGAIN;
6541
6542         if (!(bp->autoneg & AUTONEG_SPEED)) {
6543                 return -EINVAL;
6544         }
6545
6546         spin_lock_bh(&bp->phy_lock);
6547
6548         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6549                 int rc;
6550
6551                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6552                 spin_unlock_bh(&bp->phy_lock);
6553                 return rc;
6554         }
6555
6556         /* Force a link down visible on the other side */
6557         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6558                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6559                 spin_unlock_bh(&bp->phy_lock);
6560
6561                 msleep(20);
6562
6563                 spin_lock_bh(&bp->phy_lock);
6564
6565                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6566                 bp->serdes_an_pending = 1;
6567                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6568         }
6569
6570         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6571         bmcr &= ~BMCR_LOOPBACK;
6572         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6573
6574         spin_unlock_bh(&bp->phy_lock);
6575
6576         return 0;
6577 }
6578
6579 static int
6580 bnx2_get_eeprom_len(struct net_device *dev)
6581 {
6582         struct bnx2 *bp = netdev_priv(dev);
6583
6584         if (bp->flash_info == NULL)
6585                 return 0;
6586
6587         return (int) bp->flash_size;
6588 }
6589
6590 static int
6591 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6592                 u8 *eebuf)
6593 {
6594         struct bnx2 *bp = netdev_priv(dev);
6595         int rc;
6596
6597         if (!netif_running(dev))
6598                 return -EAGAIN;
6599
6600         /* parameters already validated in ethtool_get_eeprom */
6601
6602         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6603
6604         return rc;
6605 }
6606
6607 static int
6608 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6609                 u8 *eebuf)
6610 {
6611         struct bnx2 *bp = netdev_priv(dev);
6612         int rc;
6613
6614         if (!netif_running(dev))
6615                 return -EAGAIN;
6616
6617         /* parameters already validated in ethtool_set_eeprom */
6618
6619         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6620
6621         return rc;
6622 }
6623
6624 static int
6625 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6626 {
6627         struct bnx2 *bp = netdev_priv(dev);
6628
6629         memset(coal, 0, sizeof(struct ethtool_coalesce));
6630
6631         coal->rx_coalesce_usecs = bp->rx_ticks;
6632         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6633         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6634         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6635
6636         coal->tx_coalesce_usecs = bp->tx_ticks;
6637         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6638         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6639         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6640
6641         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6642
6643         return 0;
6644 }
6645
6646 static int
6647 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6648 {
6649         struct bnx2 *bp = netdev_priv(dev);
6650
6651         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6652         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6653
6654         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6655         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6656
6657         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6658         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6659
6660         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6661         if (bp->rx_quick_cons_trip_int > 0xff)
6662                 bp->rx_quick_cons_trip_int = 0xff;
6663
6664         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6665         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6666
6667         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6668         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6669
6670         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6671         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6672
6673         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6674         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6675                 0xff;
6676
6677         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6678         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6679                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6680                         bp->stats_ticks = USEC_PER_SEC;
6681         }
6682         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6683                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6684         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6685
6686         if (netif_running(bp->dev)) {
6687                 bnx2_netif_stop(bp);
6688                 bnx2_init_nic(bp, 0);
6689                 bnx2_netif_start(bp);
6690         }
6691
6692         return 0;
6693 }
6694
6695 static void
6696 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6697 {
6698         struct bnx2 *bp = netdev_priv(dev);
6699
6700         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6701         ering->rx_mini_max_pending = 0;
6702         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6703
6704         ering->rx_pending = bp->rx_ring_size;
6705         ering->rx_mini_pending = 0;
6706         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6707
6708         ering->tx_max_pending = MAX_TX_DESC_CNT;
6709         ering->tx_pending = bp->tx_ring_size;
6710 }
6711
6712 static int
6713 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6714 {
6715         if (netif_running(bp->dev)) {
6716                 bnx2_netif_stop(bp);
6717                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6718                 bnx2_free_skbs(bp);
6719                 bnx2_free_mem(bp);
6720         }
6721
6722         bnx2_set_rx_ring_size(bp, rx);
6723         bp->tx_ring_size = tx;
6724
6725         if (netif_running(bp->dev)) {
6726                 int rc;
6727
6728                 rc = bnx2_alloc_mem(bp);
6729                 if (rc)
6730                         return rc;
6731                 bnx2_init_nic(bp, 0);
6732                 bnx2_netif_start(bp);
6733         }
6734         return 0;
6735 }
6736
6737 static int
6738 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6739 {
6740         struct bnx2 *bp = netdev_priv(dev);
6741         int rc;
6742
6743         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6744                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6745                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6746
6747                 return -EINVAL;
6748         }
6749         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6750         return rc;
6751 }
6752
6753 static void
6754 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6755 {
6756         struct bnx2 *bp = netdev_priv(dev);
6757
6758         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6759         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6760         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6761 }
6762
6763 static int
6764 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6765 {
6766         struct bnx2 *bp = netdev_priv(dev);
6767
6768         bp->req_flow_ctrl = 0;
6769         if (epause->rx_pause)
6770                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6771         if (epause->tx_pause)
6772                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6773
6774         if (epause->autoneg) {
6775                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6776         }
6777         else {
6778                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6779         }
6780
6781         if (netif_running(dev)) {
6782                 spin_lock_bh(&bp->phy_lock);
6783                 bnx2_setup_phy(bp, bp->phy_port);
6784                 spin_unlock_bh(&bp->phy_lock);
6785         }
6786
6787         return 0;
6788 }
6789
6790 static u32
6791 bnx2_get_rx_csum(struct net_device *dev)
6792 {
6793         struct bnx2 *bp = netdev_priv(dev);
6794
6795         return bp->rx_csum;
6796 }
6797
6798 static int
6799 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6800 {
6801         struct bnx2 *bp = netdev_priv(dev);
6802
6803         bp->rx_csum = data;
6804         return 0;
6805 }
6806
6807 static int
6808 bnx2_set_tso(struct net_device *dev, u32 data)
6809 {
6810         struct bnx2 *bp = netdev_priv(dev);
6811
6812         if (data) {
6813                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6814                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6815                         dev->features |= NETIF_F_TSO6;
6816         } else
6817                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6818                                    NETIF_F_TSO_ECN);
6819         return 0;
6820 }
6821
6822 #define BNX2_NUM_STATS 46
6823
6824 static struct {
6825         char string[ETH_GSTRING_LEN];
6826 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6827         { "rx_bytes" },
6828         { "rx_error_bytes" },
6829         { "tx_bytes" },
6830         { "tx_error_bytes" },
6831         { "rx_ucast_packets" },
6832         { "rx_mcast_packets" },
6833         { "rx_bcast_packets" },
6834         { "tx_ucast_packets" },
6835         { "tx_mcast_packets" },
6836         { "tx_bcast_packets" },
6837         { "tx_mac_errors" },
6838         { "tx_carrier_errors" },
6839         { "rx_crc_errors" },
6840         { "rx_align_errors" },
6841         { "tx_single_collisions" },
6842         { "tx_multi_collisions" },
6843         { "tx_deferred" },
6844         { "tx_excess_collisions" },
6845         { "tx_late_collisions" },
6846         { "tx_total_collisions" },
6847         { "rx_fragments" },
6848         { "rx_jabbers" },
6849         { "rx_undersize_packets" },
6850         { "rx_oversize_packets" },
6851         { "rx_64_byte_packets" },
6852         { "rx_65_to_127_byte_packets" },
6853         { "rx_128_to_255_byte_packets" },
6854         { "rx_256_to_511_byte_packets" },
6855         { "rx_512_to_1023_byte_packets" },
6856         { "rx_1024_to_1522_byte_packets" },
6857         { "rx_1523_to_9022_byte_packets" },
6858         { "tx_64_byte_packets" },
6859         { "tx_65_to_127_byte_packets" },
6860         { "tx_128_to_255_byte_packets" },
6861         { "tx_256_to_511_byte_packets" },
6862         { "tx_512_to_1023_byte_packets" },
6863         { "tx_1024_to_1522_byte_packets" },
6864         { "tx_1523_to_9022_byte_packets" },
6865         { "rx_xon_frames" },
6866         { "rx_xoff_frames" },
6867         { "tx_xon_frames" },
6868         { "tx_xoff_frames" },
6869         { "rx_mac_ctrl_frames" },
6870         { "rx_filtered_packets" },
6871         { "rx_discards" },
6872         { "rx_fw_discards" },
6873 };
6874
6875 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6876
6877 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6878     STATS_OFFSET32(stat_IfHCInOctets_hi),
6879     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6880     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6881     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6882     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6883     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6884     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6885     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6886     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6887     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6888     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6889     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6890     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6891     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6892     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6893     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6894     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6895     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6896     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6897     STATS_OFFSET32(stat_EtherStatsCollisions),
6898     STATS_OFFSET32(stat_EtherStatsFragments),
6899     STATS_OFFSET32(stat_EtherStatsJabbers),
6900     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6901     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6902     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6903     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6904     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6905     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6906     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6907     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6908     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6909     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6910     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6911     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6912     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6913     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6914     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6915     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6916     STATS_OFFSET32(stat_XonPauseFramesReceived),
6917     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6918     STATS_OFFSET32(stat_OutXonSent),
6919     STATS_OFFSET32(stat_OutXoffSent),
6920     STATS_OFFSET32(stat_MacControlFramesReceived),
6921     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6922     STATS_OFFSET32(stat_IfInMBUFDiscards),
6923     STATS_OFFSET32(stat_FwRxDrop),
6924 };
6925
6926 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6927  * skipped because of errata.
6928  */
6929 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6930         8,0,8,8,8,8,8,8,8,8,
6931         4,0,4,4,4,4,4,4,4,4,
6932         4,4,4,4,4,4,4,4,4,4,
6933         4,4,4,4,4,4,4,4,4,4,
6934         4,4,4,4,4,4,
6935 };
6936
6937 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6938         8,0,8,8,8,8,8,8,8,8,
6939         4,4,4,4,4,4,4,4,4,4,
6940         4,4,4,4,4,4,4,4,4,4,
6941         4,4,4,4,4,4,4,4,4,4,
6942         4,4,4,4,4,4,
6943 };
6944
6945 #define BNX2_NUM_TESTS 6
6946
6947 static struct {
6948         char string[ETH_GSTRING_LEN];
6949 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6950         { "register_test (offline)" },
6951         { "memory_test (offline)" },
6952         { "loopback_test (offline)" },
6953         { "nvram_test (online)" },
6954         { "interrupt_test (online)" },
6955         { "link_test (online)" },
6956 };
6957
6958 static int
6959 bnx2_get_sset_count(struct net_device *dev, int sset)
6960 {
6961         switch (sset) {
6962         case ETH_SS_TEST:
6963                 return BNX2_NUM_TESTS;
6964         case ETH_SS_STATS:
6965                 return BNX2_NUM_STATS;
6966         default:
6967                 return -EOPNOTSUPP;
6968         }
6969 }
6970
6971 static void
6972 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6973 {
6974         struct bnx2 *bp = netdev_priv(dev);
6975
6976         bnx2_set_power_state(bp, PCI_D0);
6977
6978         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6979         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6980                 int i;
6981
6982                 bnx2_netif_stop(bp);
6983                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6984                 bnx2_free_skbs(bp);
6985
6986                 if (bnx2_test_registers(bp) != 0) {
6987                         buf[0] = 1;
6988                         etest->flags |= ETH_TEST_FL_FAILED;
6989                 }
6990                 if (bnx2_test_memory(bp) != 0) {
6991                         buf[1] = 1;
6992                         etest->flags |= ETH_TEST_FL_FAILED;
6993                 }
6994                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6995                         etest->flags |= ETH_TEST_FL_FAILED;
6996
6997                 if (!netif_running(bp->dev))
6998                         bnx2_shutdown_chip(bp);
6999                 else {
7000                         bnx2_init_nic(bp, 1);
7001                         bnx2_netif_start(bp);
7002                 }
7003
7004                 /* wait for link up */
7005                 for (i = 0; i < 7; i++) {
7006                         if (bp->link_up)
7007                                 break;
7008                         msleep_interruptible(1000);
7009                 }
7010         }
7011
7012         if (bnx2_test_nvram(bp) != 0) {
7013                 buf[3] = 1;
7014                 etest->flags |= ETH_TEST_FL_FAILED;
7015         }
7016         if (bnx2_test_intr(bp) != 0) {
7017                 buf[4] = 1;
7018                 etest->flags |= ETH_TEST_FL_FAILED;
7019         }
7020
7021         if (bnx2_test_link(bp) != 0) {
7022                 buf[5] = 1;
7023                 etest->flags |= ETH_TEST_FL_FAILED;
7024
7025         }
7026         if (!netif_running(bp->dev))
7027                 bnx2_set_power_state(bp, PCI_D3hot);
7028 }
7029
7030 static void
7031 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7032 {
7033         switch (stringset) {
7034         case ETH_SS_STATS:
7035                 memcpy(buf, bnx2_stats_str_arr,
7036                         sizeof(bnx2_stats_str_arr));
7037                 break;
7038         case ETH_SS_TEST:
7039                 memcpy(buf, bnx2_tests_str_arr,
7040                         sizeof(bnx2_tests_str_arr));
7041                 break;
7042         }
7043 }
7044
7045 static void
7046 bnx2_get_ethtool_stats(struct net_device *dev,
7047                 struct ethtool_stats *stats, u64 *buf)
7048 {
7049         struct bnx2 *bp = netdev_priv(dev);
7050         int i;
7051         u32 *hw_stats = (u32 *) bp->stats_blk;
7052         u8 *stats_len_arr = NULL;
7053
7054         if (hw_stats == NULL) {
7055                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7056                 return;
7057         }
7058
7059         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7060             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7061             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7062             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7063                 stats_len_arr = bnx2_5706_stats_len_arr;
7064         else
7065                 stats_len_arr = bnx2_5708_stats_len_arr;
7066
7067         for (i = 0; i < BNX2_NUM_STATS; i++) {
7068                 if (stats_len_arr[i] == 0) {
7069                         /* skip this counter */
7070                         buf[i] = 0;
7071                         continue;
7072                 }
7073                 if (stats_len_arr[i] == 4) {
7074                         /* 4-byte counter */
7075                         buf[i] = (u64)
7076                                 *(hw_stats + bnx2_stats_offset_arr[i]);
7077                         continue;
7078                 }
7079                 /* 8-byte counter */
7080                 buf[i] = (((u64) *(hw_stats +
7081                                         bnx2_stats_offset_arr[i])) << 32) +
7082                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7083         }
7084 }
7085
7086 static int
7087 bnx2_phys_id(struct net_device *dev, u32 data)
7088 {
7089         struct bnx2 *bp = netdev_priv(dev);
7090         int i;
7091         u32 save;
7092
7093         bnx2_set_power_state(bp, PCI_D0);
7094
7095         if (data == 0)
7096                 data = 2;
7097
7098         save = REG_RD(bp, BNX2_MISC_CFG);
7099         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7100
7101         for (i = 0; i < (data * 2); i++) {
7102                 if ((i % 2) == 0) {
7103                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7104                 }
7105                 else {
7106                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7107                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7108                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7109                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7110                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7111                                 BNX2_EMAC_LED_TRAFFIC);
7112                 }
7113                 msleep_interruptible(500);
7114                 if (signal_pending(current))
7115                         break;
7116         }
7117         REG_WR(bp, BNX2_EMAC_LED, 0);
7118         REG_WR(bp, BNX2_MISC_CFG, save);
7119
7120         if (!netif_running(dev))
7121                 bnx2_set_power_state(bp, PCI_D3hot);
7122
7123         return 0;
7124 }
7125
7126 static int
7127 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7128 {
7129         struct bnx2 *bp = netdev_priv(dev);
7130
7131         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7132                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7133         else
7134                 return (ethtool_op_set_tx_csum(dev, data));
7135 }
7136
7137 static const struct ethtool_ops bnx2_ethtool_ops = {
7138         .get_settings           = bnx2_get_settings,
7139         .set_settings           = bnx2_set_settings,
7140         .get_drvinfo            = bnx2_get_drvinfo,
7141         .get_regs_len           = bnx2_get_regs_len,
7142         .get_regs               = bnx2_get_regs,
7143         .get_wol                = bnx2_get_wol,
7144         .set_wol                = bnx2_set_wol,
7145         .nway_reset             = bnx2_nway_reset,
7146         .get_link               = ethtool_op_get_link,
7147         .get_eeprom_len         = bnx2_get_eeprom_len,
7148         .get_eeprom             = bnx2_get_eeprom,
7149         .set_eeprom             = bnx2_set_eeprom,
7150         .get_coalesce           = bnx2_get_coalesce,
7151         .set_coalesce           = bnx2_set_coalesce,
7152         .get_ringparam          = bnx2_get_ringparam,
7153         .set_ringparam          = bnx2_set_ringparam,
7154         .get_pauseparam         = bnx2_get_pauseparam,
7155         .set_pauseparam         = bnx2_set_pauseparam,
7156         .get_rx_csum            = bnx2_get_rx_csum,
7157         .set_rx_csum            = bnx2_set_rx_csum,
7158         .set_tx_csum            = bnx2_set_tx_csum,
7159         .set_sg                 = ethtool_op_set_sg,
7160         .set_tso                = bnx2_set_tso,
7161         .self_test              = bnx2_self_test,
7162         .get_strings            = bnx2_get_strings,
7163         .phys_id                = bnx2_phys_id,
7164         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7165         .get_sset_count         = bnx2_get_sset_count,
7166 };
7167
7168 /* Called with rtnl_lock */
7169 static int
7170 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7171 {
7172         struct mii_ioctl_data *data = if_mii(ifr);
7173         struct bnx2 *bp = netdev_priv(dev);
7174         int err;
7175
7176         switch(cmd) {
7177         case SIOCGMIIPHY:
7178                 data->phy_id = bp->phy_addr;
7179
7180                 /* fallthru */
7181         case SIOCGMIIREG: {
7182                 u32 mii_regval;
7183
7184                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7185                         return -EOPNOTSUPP;
7186
7187                 if (!netif_running(dev))
7188                         return -EAGAIN;
7189
7190                 spin_lock_bh(&bp->phy_lock);
7191                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7192                 spin_unlock_bh(&bp->phy_lock);
7193
7194                 data->val_out = mii_regval;
7195
7196                 return err;
7197         }
7198
7199         case SIOCSMIIREG:
7200                 if (!capable(CAP_NET_ADMIN))
7201                         return -EPERM;
7202
7203                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7204                         return -EOPNOTSUPP;
7205
7206                 if (!netif_running(dev))
7207                         return -EAGAIN;
7208
7209                 spin_lock_bh(&bp->phy_lock);
7210                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7211                 spin_unlock_bh(&bp->phy_lock);
7212
7213                 return err;
7214
7215         default:
7216                 /* do nothing */
7217                 break;
7218         }
7219         return -EOPNOTSUPP;
7220 }
7221
7222 /* Called with rtnl_lock */
7223 static int
7224 bnx2_change_mac_addr(struct net_device *dev, void *p)
7225 {
7226         struct sockaddr *addr = p;
7227         struct bnx2 *bp = netdev_priv(dev);
7228
7229         if (!is_valid_ether_addr(addr->sa_data))
7230                 return -EINVAL;
7231
7232         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7233         if (netif_running(dev))
7234                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7235
7236         return 0;
7237 }
7238
7239 /* Called with rtnl_lock */
7240 static int
7241 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7242 {
7243         struct bnx2 *bp = netdev_priv(dev);
7244
7245         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7246                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7247                 return -EINVAL;
7248
7249         dev->mtu = new_mtu;
7250         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7251 }
7252
7253 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7254 static void
7255 poll_bnx2(struct net_device *dev)
7256 {
7257         struct bnx2 *bp = netdev_priv(dev);
7258         int i;
7259
7260         for (i = 0; i < bp->irq_nvecs; i++) {
7261                 disable_irq(bp->irq_tbl[i].vector);
7262                 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7263                 enable_irq(bp->irq_tbl[i].vector);
7264         }
7265 }
7266 #endif
7267
7268 static void __devinit
7269 bnx2_get_5709_media(struct bnx2 *bp)
7270 {
7271         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7272         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7273         u32 strap;
7274
7275         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7276                 return;
7277         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7278                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7279                 return;
7280         }
7281
7282         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7283                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7284         else
7285                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7286
7287         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7288                 switch (strap) {
7289                 case 0x4:
7290                 case 0x5:
7291                 case 0x6:
7292                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7293                         return;
7294                 }
7295         } else {
7296                 switch (strap) {
7297                 case 0x1:
7298                 case 0x2:
7299                 case 0x4:
7300                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7301                         return;
7302                 }
7303         }
7304 }
7305
7306 static void __devinit
7307 bnx2_get_pci_speed(struct bnx2 *bp)
7308 {
7309         u32 reg;
7310
7311         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7312         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7313                 u32 clkreg;
7314
7315                 bp->flags |= BNX2_FLAG_PCIX;
7316
7317                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7318
7319                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7320                 switch (clkreg) {
7321                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7322                         bp->bus_speed_mhz = 133;
7323                         break;
7324
7325                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7326                         bp->bus_speed_mhz = 100;
7327                         break;
7328
7329                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7330                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7331                         bp->bus_speed_mhz = 66;
7332                         break;
7333
7334                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7335                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7336                         bp->bus_speed_mhz = 50;
7337                         break;
7338
7339                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7340                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7341                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7342                         bp->bus_speed_mhz = 33;
7343                         break;
7344                 }
7345         }
7346         else {
7347                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7348                         bp->bus_speed_mhz = 66;
7349                 else
7350                         bp->bus_speed_mhz = 33;
7351         }
7352
7353         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7354                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7355
7356 }
7357
7358 static int __devinit
7359 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7360 {
7361         struct bnx2 *bp;
7362         unsigned long mem_len;
7363         int rc, i, j;
7364         u32 reg;
7365         u64 dma_mask, persist_dma_mask;
7366
7367         SET_NETDEV_DEV(dev, &pdev->dev);
7368         bp = netdev_priv(dev);
7369
7370         bp->flags = 0;
7371         bp->phy_flags = 0;
7372
7373         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7374         rc = pci_enable_device(pdev);
7375         if (rc) {
7376                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7377                 goto err_out;
7378         }
7379
7380         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7381                 dev_err(&pdev->dev,
7382                         "Cannot find PCI device base address, aborting.\n");
7383                 rc = -ENODEV;
7384                 goto err_out_disable;
7385         }
7386
7387         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7388         if (rc) {
7389                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7390                 goto err_out_disable;
7391         }
7392
7393         pci_set_master(pdev);
7394         pci_save_state(pdev);
7395
7396         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7397         if (bp->pm_cap == 0) {
7398                 dev_err(&pdev->dev,
7399                         "Cannot find power management capability, aborting.\n");
7400                 rc = -EIO;
7401                 goto err_out_release;
7402         }
7403
7404         bp->dev = dev;
7405         bp->pdev = pdev;
7406
7407         spin_lock_init(&bp->phy_lock);
7408         spin_lock_init(&bp->indirect_lock);
7409         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7410
7411         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7412         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7413         dev->mem_end = dev->mem_start + mem_len;
7414         dev->irq = pdev->irq;
7415
7416         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7417
7418         if (!bp->regview) {
7419                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7420                 rc = -ENOMEM;
7421                 goto err_out_release;
7422         }
7423
7424         /* Configure byte swap and enable write to the reg_window registers.
7425          * Rely on CPU to do target byte swapping on big endian systems
7426          * The chip's target access swapping will not swap all accesses
7427          */
7428         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7429                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7430                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7431
7432         bnx2_set_power_state(bp, PCI_D0);
7433
7434         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7435
7436         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7437                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7438                         dev_err(&pdev->dev,
7439                                 "Cannot find PCIE capability, aborting.\n");
7440                         rc = -EIO;
7441                         goto err_out_unmap;
7442                 }
7443                 bp->flags |= BNX2_FLAG_PCIE;
7444                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7445                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7446         } else {
7447                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7448                 if (bp->pcix_cap == 0) {
7449                         dev_err(&pdev->dev,
7450                                 "Cannot find PCIX capability, aborting.\n");
7451                         rc = -EIO;
7452                         goto err_out_unmap;
7453                 }
7454         }
7455
7456         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7457                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7458                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7459         }
7460
7461         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7462                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7463                         bp->flags |= BNX2_FLAG_MSI_CAP;
7464         }
7465
7466         /* 5708 cannot support DMA addresses > 40-bit.  */
7467         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7468                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7469         else
7470                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7471
7472         /* Configure DMA attributes. */
7473         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7474                 dev->features |= NETIF_F_HIGHDMA;
7475                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7476                 if (rc) {
7477                         dev_err(&pdev->dev,
7478                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7479                         goto err_out_unmap;
7480                 }
7481         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7482                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7483                 goto err_out_unmap;
7484         }
7485
7486         if (!(bp->flags & BNX2_FLAG_PCIE))
7487                 bnx2_get_pci_speed(bp);
7488
7489         /* 5706A0 may falsely detect SERR and PERR. */
7490         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7491                 reg = REG_RD(bp, PCI_COMMAND);
7492                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7493                 REG_WR(bp, PCI_COMMAND, reg);
7494         }
7495         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7496                 !(bp->flags & BNX2_FLAG_PCIX)) {
7497
7498                 dev_err(&pdev->dev,
7499                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7500                 goto err_out_unmap;
7501         }
7502
7503         bnx2_init_nvram(bp);
7504
7505         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7506
7507         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7508             BNX2_SHM_HDR_SIGNATURE_SIG) {
7509                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7510
7511                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7512         } else
7513                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7514
7515         /* Get the permanent MAC address.  First we need to make sure the
7516          * firmware is actually running.
7517          */
7518         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7519
7520         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7521             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7522                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7523                 rc = -ENODEV;
7524                 goto err_out_unmap;
7525         }
7526
7527         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7528         for (i = 0, j = 0; i < 3; i++) {
7529                 u8 num, k, skip0;
7530
7531                 num = (u8) (reg >> (24 - (i * 8)));
7532                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7533                         if (num >= k || !skip0 || k == 1) {
7534                                 bp->fw_version[j++] = (num / k) + '0';
7535                                 skip0 = 0;
7536                         }
7537                 }
7538                 if (i != 2)
7539                         bp->fw_version[j++] = '.';
7540         }
7541         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7542         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7543                 bp->wol = 1;
7544
7545         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7546                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7547
7548                 for (i = 0; i < 30; i++) {
7549                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7550                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7551                                 break;
7552                         msleep(10);
7553                 }
7554         }
7555         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7556         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7557         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7558             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7559                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7560
7561                 bp->fw_version[j++] = ' ';
7562                 for (i = 0; i < 3; i++) {
7563                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7564                         reg = swab32(reg);
7565                         memcpy(&bp->fw_version[j], &reg, 4);
7566                         j += 4;
7567                 }
7568         }
7569
7570         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7571         bp->mac_addr[0] = (u8) (reg >> 8);
7572         bp->mac_addr[1] = (u8) reg;
7573
7574         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7575         bp->mac_addr[2] = (u8) (reg >> 24);
7576         bp->mac_addr[3] = (u8) (reg >> 16);
7577         bp->mac_addr[4] = (u8) (reg >> 8);
7578         bp->mac_addr[5] = (u8) reg;
7579
7580         bp->tx_ring_size = MAX_TX_DESC_CNT;
7581         bnx2_set_rx_ring_size(bp, 255);
7582
7583         bp->rx_csum = 1;
7584
7585         bp->tx_quick_cons_trip_int = 20;
7586         bp->tx_quick_cons_trip = 20;
7587         bp->tx_ticks_int = 80;
7588         bp->tx_ticks = 80;
7589
7590         bp->rx_quick_cons_trip_int = 6;
7591         bp->rx_quick_cons_trip = 6;
7592         bp->rx_ticks_int = 18;
7593         bp->rx_ticks = 18;
7594
7595         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7596
7597         bp->current_interval = BNX2_TIMER_INTERVAL;
7598
7599         bp->phy_addr = 1;
7600
7601         /* Disable WOL support if we are running on a SERDES chip. */
7602         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7603                 bnx2_get_5709_media(bp);
7604         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7605                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7606
7607         bp->phy_port = PORT_TP;
7608         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7609                 bp->phy_port = PORT_FIBRE;
7610                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7611                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7612                         bp->flags |= BNX2_FLAG_NO_WOL;
7613                         bp->wol = 0;
7614                 }
7615                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7616                         /* Don't do parallel detect on this board because of
7617                          * some board problems.  The link will not go down
7618                          * if we do parallel detect.
7619                          */
7620                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7621                             pdev->subsystem_device == 0x310c)
7622                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7623                 } else {
7624                         bp->phy_addr = 2;
7625                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7626                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7627                 }
7628         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7629                    CHIP_NUM(bp) == CHIP_NUM_5708)
7630                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7631         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7632                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7633                   CHIP_REV(bp) == CHIP_REV_Bx))
7634                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7635
7636         bnx2_init_fw_cap(bp);
7637
7638         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7639             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7640             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7641             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7642                 bp->flags |= BNX2_FLAG_NO_WOL;
7643                 bp->wol = 0;
7644         }
7645
7646         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7647                 bp->tx_quick_cons_trip_int =
7648                         bp->tx_quick_cons_trip;
7649                 bp->tx_ticks_int = bp->tx_ticks;
7650                 bp->rx_quick_cons_trip_int =
7651                         bp->rx_quick_cons_trip;
7652                 bp->rx_ticks_int = bp->rx_ticks;
7653                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7654                 bp->com_ticks_int = bp->com_ticks;
7655                 bp->cmd_ticks_int = bp->cmd_ticks;
7656         }
7657
7658         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7659          *
7660          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7661          * with byte enables disabled on the unused 32-bit word.  This is legal
7662          * but causes problems on the AMD 8132 which will eventually stop
7663          * responding after a while.
7664          *
7665          * AMD believes this incompatibility is unique to the 5706, and
7666          * prefers to locally disable MSI rather than globally disabling it.
7667          */
7668         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7669                 struct pci_dev *amd_8132 = NULL;
7670
7671                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7672                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7673                                                   amd_8132))) {
7674
7675                         if (amd_8132->revision >= 0x10 &&
7676                             amd_8132->revision <= 0x13) {
7677                                 disable_msi = 1;
7678                                 pci_dev_put(amd_8132);
7679                                 break;
7680                         }
7681                 }
7682         }
7683
7684         bnx2_set_default_link(bp);
7685         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7686
7687         init_timer(&bp->timer);
7688         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7689         bp->timer.data = (unsigned long) bp;
7690         bp->timer.function = bnx2_timer;
7691
7692         return 0;
7693
7694 err_out_unmap:
7695         if (bp->regview) {
7696                 iounmap(bp->regview);
7697                 bp->regview = NULL;
7698         }
7699
7700 err_out_release:
7701         pci_release_regions(pdev);
7702
7703 err_out_disable:
7704         pci_disable_device(pdev);
7705         pci_set_drvdata(pdev, NULL);
7706
7707 err_out:
7708         return rc;
7709 }
7710
7711 static char * __devinit
7712 bnx2_bus_string(struct bnx2 *bp, char *str)
7713 {
7714         char *s = str;
7715
7716         if (bp->flags & BNX2_FLAG_PCIE) {
7717                 s += sprintf(s, "PCI Express");
7718         } else {
7719                 s += sprintf(s, "PCI");
7720                 if (bp->flags & BNX2_FLAG_PCIX)
7721                         s += sprintf(s, "-X");
7722                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7723                         s += sprintf(s, " 32-bit");
7724                 else
7725                         s += sprintf(s, " 64-bit");
7726                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7727         }
7728         return str;
7729 }
7730
7731 static void __devinit
7732 bnx2_init_napi(struct bnx2 *bp)
7733 {
7734         int i;
7735
7736         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7737                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7738                 int (*poll)(struct napi_struct *, int);
7739
7740                 if (i == 0)
7741                         poll = bnx2_poll;
7742                 else
7743                         poll = bnx2_poll_msix;
7744
7745                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7746                 bnapi->bp = bp;
7747         }
7748 }
7749
7750 static const struct net_device_ops bnx2_netdev_ops = {
7751         .ndo_open               = bnx2_open,
7752         .ndo_start_xmit         = bnx2_start_xmit,
7753         .ndo_stop               = bnx2_close,
7754         .ndo_get_stats          = bnx2_get_stats,
7755         .ndo_set_rx_mode        = bnx2_set_rx_mode,
7756         .ndo_do_ioctl           = bnx2_ioctl,
7757         .ndo_validate_addr      = eth_validate_addr,
7758         .ndo_set_mac_address    = bnx2_change_mac_addr,
7759         .ndo_change_mtu         = bnx2_change_mtu,
7760         .ndo_tx_timeout         = bnx2_tx_timeout,
7761 #ifdef BCM_VLAN
7762         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
7763 #endif
7764 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7765         .ndo_poll_controller    = poll_bnx2,
7766 #endif
7767 };
7768
7769 static int __devinit
7770 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7771 {
7772         static int version_printed = 0;
7773         struct net_device *dev = NULL;
7774         struct bnx2 *bp;
7775         int rc;
7776         char str[40];
7777
7778         if (version_printed++ == 0)
7779                 printk(KERN_INFO "%s", version);
7780
7781         /* dev zeroed in init_etherdev */
7782         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7783
7784         if (!dev)
7785                 return -ENOMEM;
7786
7787         rc = bnx2_init_board(pdev, dev);
7788         if (rc < 0) {
7789                 free_netdev(dev);
7790                 return rc;
7791         }
7792
7793         dev->netdev_ops = &bnx2_netdev_ops;
7794         dev->watchdog_timeo = TX_TIMEOUT;
7795         dev->ethtool_ops = &bnx2_ethtool_ops;
7796
7797         bp = netdev_priv(dev);
7798         bnx2_init_napi(bp);
7799
7800         pci_set_drvdata(pdev, dev);
7801
7802         memcpy(dev->dev_addr, bp->mac_addr, 6);
7803         memcpy(dev->perm_addr, bp->mac_addr, 6);
7804
7805         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7806         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7807                 dev->features |= NETIF_F_IPV6_CSUM;
7808
7809 #ifdef BCM_VLAN
7810         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7811 #endif
7812         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7813         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7814                 dev->features |= NETIF_F_TSO6;
7815
7816         if ((rc = register_netdev(dev))) {
7817                 dev_err(&pdev->dev, "Cannot register net device\n");
7818                 if (bp->regview)
7819                         iounmap(bp->regview);
7820                 pci_release_regions(pdev);
7821                 pci_disable_device(pdev);
7822                 pci_set_drvdata(pdev, NULL);
7823                 free_netdev(dev);
7824                 return rc;
7825         }
7826
7827         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7828                 "IRQ %d, node addr %pM\n",
7829                 dev->name,
7830                 board_info[ent->driver_data].name,
7831                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7832                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7833                 bnx2_bus_string(bp, str),
7834                 dev->base_addr,
7835                 bp->pdev->irq, dev->dev_addr);
7836
7837         return 0;
7838 }
7839
7840 static void __devexit
7841 bnx2_remove_one(struct pci_dev *pdev)
7842 {
7843         struct net_device *dev = pci_get_drvdata(pdev);
7844         struct bnx2 *bp = netdev_priv(dev);
7845
7846         flush_scheduled_work();
7847
7848         unregister_netdev(dev);
7849
7850         if (bp->regview)
7851                 iounmap(bp->regview);
7852
7853         free_netdev(dev);
7854         pci_release_regions(pdev);
7855         pci_disable_device(pdev);
7856         pci_set_drvdata(pdev, NULL);
7857 }
7858
7859 static int
7860 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7861 {
7862         struct net_device *dev = pci_get_drvdata(pdev);
7863         struct bnx2 *bp = netdev_priv(dev);
7864
7865         /* PCI register 4 needs to be saved whether netif_running() or not.
7866          * MSI address and data need to be saved if using MSI and
7867          * netif_running().
7868          */
7869         pci_save_state(pdev);
7870         if (!netif_running(dev))
7871                 return 0;
7872
7873         flush_scheduled_work();
7874         bnx2_netif_stop(bp);
7875         netif_device_detach(dev);
7876         del_timer_sync(&bp->timer);
7877         bnx2_shutdown_chip(bp);
7878         bnx2_free_skbs(bp);
7879         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7880         return 0;
7881 }
7882
7883 static int
7884 bnx2_resume(struct pci_dev *pdev)
7885 {
7886         struct net_device *dev = pci_get_drvdata(pdev);
7887         struct bnx2 *bp = netdev_priv(dev);
7888
7889         pci_restore_state(pdev);
7890         if (!netif_running(dev))
7891                 return 0;
7892
7893         bnx2_set_power_state(bp, PCI_D0);
7894         netif_device_attach(dev);
7895         bnx2_init_nic(bp, 1);
7896         bnx2_netif_start(bp);
7897         return 0;
7898 }
7899
7900 /**
7901  * bnx2_io_error_detected - called when PCI error is detected
7902  * @pdev: Pointer to PCI device
7903  * @state: The current pci connection state
7904  *
7905  * This function is called after a PCI bus error affecting
7906  * this device has been detected.
7907  */
7908 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7909                                                pci_channel_state_t state)
7910 {
7911         struct net_device *dev = pci_get_drvdata(pdev);
7912         struct bnx2 *bp = netdev_priv(dev);
7913
7914         rtnl_lock();
7915         netif_device_detach(dev);
7916
7917         if (netif_running(dev)) {
7918                 bnx2_netif_stop(bp);
7919                 del_timer_sync(&bp->timer);
7920                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7921         }
7922
7923         pci_disable_device(pdev);
7924         rtnl_unlock();
7925
7926         /* Request a slot slot reset. */
7927         return PCI_ERS_RESULT_NEED_RESET;
7928 }
7929
7930 /**
7931  * bnx2_io_slot_reset - called after the pci bus has been reset.
7932  * @pdev: Pointer to PCI device
7933  *
7934  * Restart the card from scratch, as if from a cold-boot.
7935  */
7936 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7937 {
7938         struct net_device *dev = pci_get_drvdata(pdev);
7939         struct bnx2 *bp = netdev_priv(dev);
7940
7941         rtnl_lock();
7942         if (pci_enable_device(pdev)) {
7943                 dev_err(&pdev->dev,
7944                         "Cannot re-enable PCI device after reset.\n");
7945                 rtnl_unlock();
7946                 return PCI_ERS_RESULT_DISCONNECT;
7947         }
7948         pci_set_master(pdev);
7949         pci_restore_state(pdev);
7950
7951         if (netif_running(dev)) {
7952                 bnx2_set_power_state(bp, PCI_D0);
7953                 bnx2_init_nic(bp, 1);
7954         }
7955
7956         rtnl_unlock();
7957         return PCI_ERS_RESULT_RECOVERED;
7958 }
7959
7960 /**
7961  * bnx2_io_resume - called when traffic can start flowing again.
7962  * @pdev: Pointer to PCI device
7963  *
7964  * This callback is called when the error recovery driver tells us that
7965  * its OK to resume normal operation.
7966  */
7967 static void bnx2_io_resume(struct pci_dev *pdev)
7968 {
7969         struct net_device *dev = pci_get_drvdata(pdev);
7970         struct bnx2 *bp = netdev_priv(dev);
7971
7972         rtnl_lock();
7973         if (netif_running(dev))
7974                 bnx2_netif_start(bp);
7975
7976         netif_device_attach(dev);
7977         rtnl_unlock();
7978 }
7979
7980 static struct pci_error_handlers bnx2_err_handler = {
7981         .error_detected = bnx2_io_error_detected,
7982         .slot_reset     = bnx2_io_slot_reset,
7983         .resume         = bnx2_io_resume,
7984 };
7985
7986 static struct pci_driver bnx2_pci_driver = {
7987         .name           = DRV_MODULE_NAME,
7988         .id_table       = bnx2_pci_tbl,
7989         .probe          = bnx2_init_one,
7990         .remove         = __devexit_p(bnx2_remove_one),
7991         .suspend        = bnx2_suspend,
7992         .resume         = bnx2_resume,
7993         .err_handler    = &bnx2_err_handler,
7994 };
7995
7996 static int __init bnx2_init(void)
7997 {
7998         return pci_register_driver(&bnx2_pci_driver);
7999 }
8000
8001 static void __exit bnx2_cleanup(void)
8002 {
8003         pci_unregister_driver(&bnx2_pci_driver);
8004 }
8005
8006 module_init(bnx2_init);
8007 module_exit(bnx2_cleanup);
8008
8009
8010