bnx2: Update version to 1.9.0.
[pandora-kernel.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50 #include <linux/log2.h>
51
52 #include "bnx2.h"
53 #include "bnx2_fw.h"
54 #include "bnx2_fw2.h"
55
56 #define FW_BUF_SIZE             0x10000
57
58 #define DRV_MODULE_NAME         "bnx2"
59 #define PFX DRV_MODULE_NAME     ": "
60 #define DRV_MODULE_VERSION      "1.9.0"
61 #define DRV_MODULE_RELDATE      "Dec 16, 2008"
62
63 #define RUN_AT(x) (jiffies + (x))
64
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT  (5*HZ)
67
68 static char version[] __devinitdata =
69         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_msi = 0;
77
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81 typedef enum {
82         BCM5706 = 0,
83         NC370T,
84         NC370I,
85         BCM5706S,
86         NC370F,
87         BCM5708,
88         BCM5708S,
89         BCM5709,
90         BCM5709S,
91         BCM5716,
92         BCM5716S,
93 } board_t;
94
95 /* indexed by board_t, above */
96 static struct {
97         char *name;
98 } board_info[] __devinitdata = {
99         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
100         { "HP NC370T Multifunction Gigabit Server Adapter" },
101         { "HP NC370i Multifunction Gigabit Server Adapter" },
102         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
103         { "HP NC370F Multifunction Gigabit Server Adapter" },
104         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
105         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
106         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
107         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
108         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
109         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
110         };
111
112 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
113         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
114           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
116           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
118           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
120           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
122           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
124           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
126           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
128           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
131         { PCI_VENDOR_ID_BROADCOM, 0x163b,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
133         { PCI_VENDOR_ID_BROADCOM, 0x163c,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
135         { 0, }
136 };
137
138 static struct flash_spec flash_table[] =
139 {
140 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
141 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
142         /* Slow EEPROM */
143         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
144          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
145          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
146          "EEPROM - slow"},
147         /* Expansion entry 0001 */
148         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
149          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151          "Entry 0001"},
152         /* Saifun SA25F010 (non-buffered flash) */
153         /* strap, cfg1, & write1 need updates */
154         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
155          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
157          "Non-buffered flash (128kB)"},
158         /* Saifun SA25F020 (non-buffered flash) */
159         /* strap, cfg1, & write1 need updates */
160         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
163          "Non-buffered flash (256kB)"},
164         /* Expansion entry 0100 */
165         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
168          "Entry 0100"},
169         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
170         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
171          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
172          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
173          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
174         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
175         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
176          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
177          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
178          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
179         /* Saifun SA25F005 (non-buffered flash) */
180         /* strap, cfg1, & write1 need updates */
181         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
183          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
184          "Non-buffered flash (64kB)"},
185         /* Fast EEPROM */
186         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
187          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
188          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
189          "EEPROM - fast"},
190         /* Expansion entry 1001 */
191         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
192          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194          "Entry 1001"},
195         /* Expansion entry 1010 */
196         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
197          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
198          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199          "Entry 1010"},
200         /* ATMEL AT45DB011B (buffered flash) */
201         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
202          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
203          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
204          "Buffered flash (128kB)"},
205         /* Expansion entry 1100 */
206         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
207          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209          "Entry 1100"},
210         /* Expansion entry 1101 */
211         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
212          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
213          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214          "Entry 1101"},
215         /* Ateml Expansion entry 1110 */
216         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
217          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
218          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
219          "Entry 1110 (Atmel)"},
220         /* ATMEL AT45DB021B (buffered flash) */
221         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
222          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
223          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
224          "Buffered flash (256kB)"},
225 };
226
227 static struct flash_spec flash_5709 = {
228         .flags          = BNX2_NV_BUFFERED,
229         .page_bits      = BCM5709_FLASH_PAGE_BITS,
230         .page_size      = BCM5709_FLASH_PAGE_SIZE,
231         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
232         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
233         .name           = "5709 Buffered flash (256kB)",
234 };
235
236 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
237
238 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
239 {
240         u32 diff;
241
242         smp_mb();
243
244         /* The ring uses 256 indices for 255 entries, one of them
245          * needs to be skipped.
246          */
247         diff = txr->tx_prod - txr->tx_cons;
248         if (unlikely(diff >= TX_DESC_CNT)) {
249                 diff &= 0xffff;
250                 if (diff == TX_DESC_CNT)
251                         diff = MAX_TX_DESC_CNT;
252         }
253         return (bp->tx_ring_size - diff);
254 }
255
256 static u32
257 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
258 {
259         u32 val;
260
261         spin_lock_bh(&bp->indirect_lock);
262         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
263         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
264         spin_unlock_bh(&bp->indirect_lock);
265         return val;
266 }
267
268 static void
269 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
270 {
271         spin_lock_bh(&bp->indirect_lock);
272         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
273         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
274         spin_unlock_bh(&bp->indirect_lock);
275 }
276
277 static void
278 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
279 {
280         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
281 }
282
283 static u32
284 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
285 {
286         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
287 }
288
289 static void
290 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
291 {
292         offset += cid_addr;
293         spin_lock_bh(&bp->indirect_lock);
294         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
295                 int i;
296
297                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
298                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
299                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
300                 for (i = 0; i < 5; i++) {
301                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
302                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
303                                 break;
304                         udelay(5);
305                 }
306         } else {
307                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
308                 REG_WR(bp, BNX2_CTX_DATA, val);
309         }
310         spin_unlock_bh(&bp->indirect_lock);
311 }
312
313 static int
314 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
315 {
316         u32 val1;
317         int i, ret;
318
319         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
320                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
322
323                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
324                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325
326                 udelay(40);
327         }
328
329         val1 = (bp->phy_addr << 21) | (reg << 16) |
330                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
331                 BNX2_EMAC_MDIO_COMM_START_BUSY;
332         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
333
334         for (i = 0; i < 50; i++) {
335                 udelay(10);
336
337                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
339                         udelay(5);
340
341                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
342                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
343
344                         break;
345                 }
346         }
347
348         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349                 *val = 0x0;
350                 ret = -EBUSY;
351         }
352         else {
353                 *val = val1;
354                 ret = 0;
355         }
356
357         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
358                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
359                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360
361                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
362                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363
364                 udelay(40);
365         }
366
367         return ret;
368 }
369
370 static int
371 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
372 {
373         u32 val1;
374         int i, ret;
375
376         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
377                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
379
380                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
381                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
382
383                 udelay(40);
384         }
385
386         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
387                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
388                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
389         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
390
391         for (i = 0; i < 50; i++) {
392                 udelay(10);
393
394                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
395                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
396                         udelay(5);
397                         break;
398                 }
399         }
400
401         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
402                 ret = -EBUSY;
403         else
404                 ret = 0;
405
406         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
407                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
408                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
409
410                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
411                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
412
413                 udelay(40);
414         }
415
416         return ret;
417 }
418
419 static void
420 bnx2_disable_int(struct bnx2 *bp)
421 {
422         int i;
423         struct bnx2_napi *bnapi;
424
425         for (i = 0; i < bp->irq_nvecs; i++) {
426                 bnapi = &bp->bnx2_napi[i];
427                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
429         }
430         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
431 }
432
433 static void
434 bnx2_enable_int(struct bnx2 *bp)
435 {
436         int i;
437         struct bnx2_napi *bnapi;
438
439         for (i = 0; i < bp->irq_nvecs; i++) {
440                 bnapi = &bp->bnx2_napi[i];
441
442                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
443                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
444                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
445                        bnapi->last_status_idx);
446
447                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
448                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
449                        bnapi->last_status_idx);
450         }
451         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
452 }
453
454 static void
455 bnx2_disable_int_sync(struct bnx2 *bp)
456 {
457         int i;
458
459         atomic_inc(&bp->intr_sem);
460         bnx2_disable_int(bp);
461         for (i = 0; i < bp->irq_nvecs; i++)
462                 synchronize_irq(bp->irq_tbl[i].vector);
463 }
464
465 static void
466 bnx2_napi_disable(struct bnx2 *bp)
467 {
468         int i;
469
470         for (i = 0; i < bp->irq_nvecs; i++)
471                 napi_disable(&bp->bnx2_napi[i].napi);
472 }
473
474 static void
475 bnx2_napi_enable(struct bnx2 *bp)
476 {
477         int i;
478
479         for (i = 0; i < bp->irq_nvecs; i++)
480                 napi_enable(&bp->bnx2_napi[i].napi);
481 }
482
483 static void
484 bnx2_netif_stop(struct bnx2 *bp)
485 {
486         bnx2_disable_int_sync(bp);
487         if (netif_running(bp->dev)) {
488                 bnx2_napi_disable(bp);
489                 netif_tx_disable(bp->dev);
490                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
491         }
492 }
493
494 static void
495 bnx2_netif_start(struct bnx2 *bp)
496 {
497         if (atomic_dec_and_test(&bp->intr_sem)) {
498                 if (netif_running(bp->dev)) {
499                         netif_tx_wake_all_queues(bp->dev);
500                         bnx2_napi_enable(bp);
501                         bnx2_enable_int(bp);
502                 }
503         }
504 }
505
506 static void
507 bnx2_free_tx_mem(struct bnx2 *bp)
508 {
509         int i;
510
511         for (i = 0; i < bp->num_tx_rings; i++) {
512                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
513                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
514
515                 if (txr->tx_desc_ring) {
516                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
517                                             txr->tx_desc_ring,
518                                             txr->tx_desc_mapping);
519                         txr->tx_desc_ring = NULL;
520                 }
521                 kfree(txr->tx_buf_ring);
522                 txr->tx_buf_ring = NULL;
523         }
524 }
525
526 static void
527 bnx2_free_rx_mem(struct bnx2 *bp)
528 {
529         int i;
530
531         for (i = 0; i < bp->num_rx_rings; i++) {
532                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
533                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
534                 int j;
535
536                 for (j = 0; j < bp->rx_max_ring; j++) {
537                         if (rxr->rx_desc_ring[j])
538                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
539                                                     rxr->rx_desc_ring[j],
540                                                     rxr->rx_desc_mapping[j]);
541                         rxr->rx_desc_ring[j] = NULL;
542                 }
543                 if (rxr->rx_buf_ring)
544                         vfree(rxr->rx_buf_ring);
545                 rxr->rx_buf_ring = NULL;
546
547                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
548                         if (rxr->rx_pg_desc_ring[j])
549                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
550                                                     rxr->rx_pg_desc_ring[i],
551                                                     rxr->rx_pg_desc_mapping[i]);
552                         rxr->rx_pg_desc_ring[i] = NULL;
553                 }
554                 if (rxr->rx_pg_ring)
555                         vfree(rxr->rx_pg_ring);
556                 rxr->rx_pg_ring = NULL;
557         }
558 }
559
560 static int
561 bnx2_alloc_tx_mem(struct bnx2 *bp)
562 {
563         int i;
564
565         for (i = 0; i < bp->num_tx_rings; i++) {
566                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
567                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
568
569                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
570                 if (txr->tx_buf_ring == NULL)
571                         return -ENOMEM;
572
573                 txr->tx_desc_ring =
574                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
575                                              &txr->tx_desc_mapping);
576                 if (txr->tx_desc_ring == NULL)
577                         return -ENOMEM;
578         }
579         return 0;
580 }
581
582 static int
583 bnx2_alloc_rx_mem(struct bnx2 *bp)
584 {
585         int i;
586
587         for (i = 0; i < bp->num_rx_rings; i++) {
588                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
589                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
590                 int j;
591
592                 rxr->rx_buf_ring =
593                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
594                 if (rxr->rx_buf_ring == NULL)
595                         return -ENOMEM;
596
597                 memset(rxr->rx_buf_ring, 0,
598                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
599
600                 for (j = 0; j < bp->rx_max_ring; j++) {
601                         rxr->rx_desc_ring[j] =
602                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
603                                                      &rxr->rx_desc_mapping[j]);
604                         if (rxr->rx_desc_ring[j] == NULL)
605                                 return -ENOMEM;
606
607                 }
608
609                 if (bp->rx_pg_ring_size) {
610                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
611                                                   bp->rx_max_pg_ring);
612                         if (rxr->rx_pg_ring == NULL)
613                                 return -ENOMEM;
614
615                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
616                                bp->rx_max_pg_ring);
617                 }
618
619                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
620                         rxr->rx_pg_desc_ring[j] =
621                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
622                                                 &rxr->rx_pg_desc_mapping[j]);
623                         if (rxr->rx_pg_desc_ring[j] == NULL)
624                                 return -ENOMEM;
625
626                 }
627         }
628         return 0;
629 }
630
631 static void
632 bnx2_free_mem(struct bnx2 *bp)
633 {
634         int i;
635         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
636
637         bnx2_free_tx_mem(bp);
638         bnx2_free_rx_mem(bp);
639
640         for (i = 0; i < bp->ctx_pages; i++) {
641                 if (bp->ctx_blk[i]) {
642                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
643                                             bp->ctx_blk[i],
644                                             bp->ctx_blk_mapping[i]);
645                         bp->ctx_blk[i] = NULL;
646                 }
647         }
648         if (bnapi->status_blk.msi) {
649                 pci_free_consistent(bp->pdev, bp->status_stats_size,
650                                     bnapi->status_blk.msi,
651                                     bp->status_blk_mapping);
652                 bnapi->status_blk.msi = NULL;
653                 bp->stats_blk = NULL;
654         }
655 }
656
657 static int
658 bnx2_alloc_mem(struct bnx2 *bp)
659 {
660         int i, status_blk_size, err;
661         struct bnx2_napi *bnapi;
662         void *status_blk;
663
664         /* Combine status and statistics blocks into one allocation. */
665         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
666         if (bp->flags & BNX2_FLAG_MSIX_CAP)
667                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
668                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
669         bp->status_stats_size = status_blk_size +
670                                 sizeof(struct statistics_block);
671
672         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
673                                           &bp->status_blk_mapping);
674         if (status_blk == NULL)
675                 goto alloc_mem_err;
676
677         memset(status_blk, 0, bp->status_stats_size);
678
679         bnapi = &bp->bnx2_napi[0];
680         bnapi->status_blk.msi = status_blk;
681         bnapi->hw_tx_cons_ptr =
682                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
683         bnapi->hw_rx_cons_ptr =
684                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
685         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
686                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
687                         struct status_block_msix *sblk;
688
689                         bnapi = &bp->bnx2_napi[i];
690
691                         sblk = (void *) (status_blk +
692                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
693                         bnapi->status_blk.msix = sblk;
694                         bnapi->hw_tx_cons_ptr =
695                                 &sblk->status_tx_quick_consumer_index;
696                         bnapi->hw_rx_cons_ptr =
697                                 &sblk->status_rx_quick_consumer_index;
698                         bnapi->int_num = i << 24;
699                 }
700         }
701
702         bp->stats_blk = status_blk + status_blk_size;
703
704         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
705
706         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
707                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
708                 if (bp->ctx_pages == 0)
709                         bp->ctx_pages = 1;
710                 for (i = 0; i < bp->ctx_pages; i++) {
711                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
712                                                 BCM_PAGE_SIZE,
713                                                 &bp->ctx_blk_mapping[i]);
714                         if (bp->ctx_blk[i] == NULL)
715                                 goto alloc_mem_err;
716                 }
717         }
718
719         err = bnx2_alloc_rx_mem(bp);
720         if (err)
721                 goto alloc_mem_err;
722
723         err = bnx2_alloc_tx_mem(bp);
724         if (err)
725                 goto alloc_mem_err;
726
727         return 0;
728
729 alloc_mem_err:
730         bnx2_free_mem(bp);
731         return -ENOMEM;
732 }
733
734 static void
735 bnx2_report_fw_link(struct bnx2 *bp)
736 {
737         u32 fw_link_status = 0;
738
739         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
740                 return;
741
742         if (bp->link_up) {
743                 u32 bmsr;
744
745                 switch (bp->line_speed) {
746                 case SPEED_10:
747                         if (bp->duplex == DUPLEX_HALF)
748                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
749                         else
750                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
751                         break;
752                 case SPEED_100:
753                         if (bp->duplex == DUPLEX_HALF)
754                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
755                         else
756                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
757                         break;
758                 case SPEED_1000:
759                         if (bp->duplex == DUPLEX_HALF)
760                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
761                         else
762                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
763                         break;
764                 case SPEED_2500:
765                         if (bp->duplex == DUPLEX_HALF)
766                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
767                         else
768                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
769                         break;
770                 }
771
772                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
773
774                 if (bp->autoneg) {
775                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
776
777                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
778                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
779
780                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
781                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
782                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
783                         else
784                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
785                 }
786         }
787         else
788                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
789
790         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
791 }
792
793 static char *
794 bnx2_xceiver_str(struct bnx2 *bp)
795 {
796         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
797                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
798                  "Copper"));
799 }
800
801 static void
802 bnx2_report_link(struct bnx2 *bp)
803 {
804         if (bp->link_up) {
805                 netif_carrier_on(bp->dev);
806                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
807                        bnx2_xceiver_str(bp));
808
809                 printk("%d Mbps ", bp->line_speed);
810
811                 if (bp->duplex == DUPLEX_FULL)
812                         printk("full duplex");
813                 else
814                         printk("half duplex");
815
816                 if (bp->flow_ctrl) {
817                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
818                                 printk(", receive ");
819                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
820                                         printk("& transmit ");
821                         }
822                         else {
823                                 printk(", transmit ");
824                         }
825                         printk("flow control ON");
826                 }
827                 printk("\n");
828         }
829         else {
830                 netif_carrier_off(bp->dev);
831                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
832                        bnx2_xceiver_str(bp));
833         }
834
835         bnx2_report_fw_link(bp);
836 }
837
838 static void
839 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
840 {
841         u32 local_adv, remote_adv;
842
843         bp->flow_ctrl = 0;
844         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
845                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
846
847                 if (bp->duplex == DUPLEX_FULL) {
848                         bp->flow_ctrl = bp->req_flow_ctrl;
849                 }
850                 return;
851         }
852
853         if (bp->duplex != DUPLEX_FULL) {
854                 return;
855         }
856
857         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
858             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
859                 u32 val;
860
861                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
862                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
863                         bp->flow_ctrl |= FLOW_CTRL_TX;
864                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
865                         bp->flow_ctrl |= FLOW_CTRL_RX;
866                 return;
867         }
868
869         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
870         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
871
872         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
873                 u32 new_local_adv = 0;
874                 u32 new_remote_adv = 0;
875
876                 if (local_adv & ADVERTISE_1000XPAUSE)
877                         new_local_adv |= ADVERTISE_PAUSE_CAP;
878                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
879                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
880                 if (remote_adv & ADVERTISE_1000XPAUSE)
881                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
882                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
883                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
884
885                 local_adv = new_local_adv;
886                 remote_adv = new_remote_adv;
887         }
888
889         /* See Table 28B-3 of 802.3ab-1999 spec. */
890         if (local_adv & ADVERTISE_PAUSE_CAP) {
891                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
892                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
893                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
894                         }
895                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
896                                 bp->flow_ctrl = FLOW_CTRL_RX;
897                         }
898                 }
899                 else {
900                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
901                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
902                         }
903                 }
904         }
905         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
906                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
907                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
908
909                         bp->flow_ctrl = FLOW_CTRL_TX;
910                 }
911         }
912 }
913
914 static int
915 bnx2_5709s_linkup(struct bnx2 *bp)
916 {
917         u32 val, speed;
918
919         bp->link_up = 1;
920
921         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
922         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
923         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
924
925         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
926                 bp->line_speed = bp->req_line_speed;
927                 bp->duplex = bp->req_duplex;
928                 return 0;
929         }
930         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
931         switch (speed) {
932                 case MII_BNX2_GP_TOP_AN_SPEED_10:
933                         bp->line_speed = SPEED_10;
934                         break;
935                 case MII_BNX2_GP_TOP_AN_SPEED_100:
936                         bp->line_speed = SPEED_100;
937                         break;
938                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
939                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
940                         bp->line_speed = SPEED_1000;
941                         break;
942                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
943                         bp->line_speed = SPEED_2500;
944                         break;
945         }
946         if (val & MII_BNX2_GP_TOP_AN_FD)
947                 bp->duplex = DUPLEX_FULL;
948         else
949                 bp->duplex = DUPLEX_HALF;
950         return 0;
951 }
952
953 static int
954 bnx2_5708s_linkup(struct bnx2 *bp)
955 {
956         u32 val;
957
958         bp->link_up = 1;
959         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
960         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
961                 case BCM5708S_1000X_STAT1_SPEED_10:
962                         bp->line_speed = SPEED_10;
963                         break;
964                 case BCM5708S_1000X_STAT1_SPEED_100:
965                         bp->line_speed = SPEED_100;
966                         break;
967                 case BCM5708S_1000X_STAT1_SPEED_1G:
968                         bp->line_speed = SPEED_1000;
969                         break;
970                 case BCM5708S_1000X_STAT1_SPEED_2G5:
971                         bp->line_speed = SPEED_2500;
972                         break;
973         }
974         if (val & BCM5708S_1000X_STAT1_FD)
975                 bp->duplex = DUPLEX_FULL;
976         else
977                 bp->duplex = DUPLEX_HALF;
978
979         return 0;
980 }
981
982 static int
983 bnx2_5706s_linkup(struct bnx2 *bp)
984 {
985         u32 bmcr, local_adv, remote_adv, common;
986
987         bp->link_up = 1;
988         bp->line_speed = SPEED_1000;
989
990         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
991         if (bmcr & BMCR_FULLDPLX) {
992                 bp->duplex = DUPLEX_FULL;
993         }
994         else {
995                 bp->duplex = DUPLEX_HALF;
996         }
997
998         if (!(bmcr & BMCR_ANENABLE)) {
999                 return 0;
1000         }
1001
1002         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1003         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1004
1005         common = local_adv & remote_adv;
1006         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1007
1008                 if (common & ADVERTISE_1000XFULL) {
1009                         bp->duplex = DUPLEX_FULL;
1010                 }
1011                 else {
1012                         bp->duplex = DUPLEX_HALF;
1013                 }
1014         }
1015
1016         return 0;
1017 }
1018
1019 static int
1020 bnx2_copper_linkup(struct bnx2 *bp)
1021 {
1022         u32 bmcr;
1023
1024         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1025         if (bmcr & BMCR_ANENABLE) {
1026                 u32 local_adv, remote_adv, common;
1027
1028                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1029                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1030
1031                 common = local_adv & (remote_adv >> 2);
1032                 if (common & ADVERTISE_1000FULL) {
1033                         bp->line_speed = SPEED_1000;
1034                         bp->duplex = DUPLEX_FULL;
1035                 }
1036                 else if (common & ADVERTISE_1000HALF) {
1037                         bp->line_speed = SPEED_1000;
1038                         bp->duplex = DUPLEX_HALF;
1039                 }
1040                 else {
1041                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1042                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1043
1044                         common = local_adv & remote_adv;
1045                         if (common & ADVERTISE_100FULL) {
1046                                 bp->line_speed = SPEED_100;
1047                                 bp->duplex = DUPLEX_FULL;
1048                         }
1049                         else if (common & ADVERTISE_100HALF) {
1050                                 bp->line_speed = SPEED_100;
1051                                 bp->duplex = DUPLEX_HALF;
1052                         }
1053                         else if (common & ADVERTISE_10FULL) {
1054                                 bp->line_speed = SPEED_10;
1055                                 bp->duplex = DUPLEX_FULL;
1056                         }
1057                         else if (common & ADVERTISE_10HALF) {
1058                                 bp->line_speed = SPEED_10;
1059                                 bp->duplex = DUPLEX_HALF;
1060                         }
1061                         else {
1062                                 bp->line_speed = 0;
1063                                 bp->link_up = 0;
1064                         }
1065                 }
1066         }
1067         else {
1068                 if (bmcr & BMCR_SPEED100) {
1069                         bp->line_speed = SPEED_100;
1070                 }
1071                 else {
1072                         bp->line_speed = SPEED_10;
1073                 }
1074                 if (bmcr & BMCR_FULLDPLX) {
1075                         bp->duplex = DUPLEX_FULL;
1076                 }
1077                 else {
1078                         bp->duplex = DUPLEX_HALF;
1079                 }
1080         }
1081
1082         return 0;
1083 }
1084
1085 static void
1086 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1087 {
1088         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1089
1090         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1091         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1092         val |= 0x02 << 8;
1093
1094         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1095                 u32 lo_water, hi_water;
1096
1097                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1098                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1099                 else
1100                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1101                 if (lo_water >= bp->rx_ring_size)
1102                         lo_water = 0;
1103
1104                 hi_water = bp->rx_ring_size / 4;
1105
1106                 if (hi_water <= lo_water)
1107                         lo_water = 0;
1108
1109                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1110                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1111
1112                 if (hi_water > 0xf)
1113                         hi_water = 0xf;
1114                 else if (hi_water == 0)
1115                         lo_water = 0;
1116                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1117         }
1118         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1119 }
1120
1121 static void
1122 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1123 {
1124         int i;
1125         u32 cid;
1126
1127         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1128                 if (i == 1)
1129                         cid = RX_RSS_CID;
1130                 bnx2_init_rx_context(bp, cid);
1131         }
1132 }
1133
1134 static void
1135 bnx2_set_mac_link(struct bnx2 *bp)
1136 {
1137         u32 val;
1138
1139         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1140         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1141                 (bp->duplex == DUPLEX_HALF)) {
1142                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1143         }
1144
1145         /* Configure the EMAC mode register. */
1146         val = REG_RD(bp, BNX2_EMAC_MODE);
1147
1148         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1149                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1150                 BNX2_EMAC_MODE_25G_MODE);
1151
1152         if (bp->link_up) {
1153                 switch (bp->line_speed) {
1154                         case SPEED_10:
1155                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1156                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1157                                         break;
1158                                 }
1159                                 /* fall through */
1160                         case SPEED_100:
1161                                 val |= BNX2_EMAC_MODE_PORT_MII;
1162                                 break;
1163                         case SPEED_2500:
1164                                 val |= BNX2_EMAC_MODE_25G_MODE;
1165                                 /* fall through */
1166                         case SPEED_1000:
1167                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1168                                 break;
1169                 }
1170         }
1171         else {
1172                 val |= BNX2_EMAC_MODE_PORT_GMII;
1173         }
1174
1175         /* Set the MAC to operate in the appropriate duplex mode. */
1176         if (bp->duplex == DUPLEX_HALF)
1177                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1178         REG_WR(bp, BNX2_EMAC_MODE, val);
1179
1180         /* Enable/disable rx PAUSE. */
1181         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1182
1183         if (bp->flow_ctrl & FLOW_CTRL_RX)
1184                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1185         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1186
1187         /* Enable/disable tx PAUSE. */
1188         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1189         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1190
1191         if (bp->flow_ctrl & FLOW_CTRL_TX)
1192                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1193         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1194
1195         /* Acknowledge the interrupt. */
1196         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1197
1198         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1199                 bnx2_init_all_rx_contexts(bp);
1200 }
1201
1202 static void
1203 bnx2_enable_bmsr1(struct bnx2 *bp)
1204 {
1205         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1206             (CHIP_NUM(bp) == CHIP_NUM_5709))
1207                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1208                                MII_BNX2_BLK_ADDR_GP_STATUS);
1209 }
1210
1211 static void
1212 bnx2_disable_bmsr1(struct bnx2 *bp)
1213 {
1214         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1215             (CHIP_NUM(bp) == CHIP_NUM_5709))
1216                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1217                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1218 }
1219
1220 static int
1221 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1222 {
1223         u32 up1;
1224         int ret = 1;
1225
1226         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1227                 return 0;
1228
1229         if (bp->autoneg & AUTONEG_SPEED)
1230                 bp->advertising |= ADVERTISED_2500baseX_Full;
1231
1232         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1233                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1234
1235         bnx2_read_phy(bp, bp->mii_up1, &up1);
1236         if (!(up1 & BCM5708S_UP1_2G5)) {
1237                 up1 |= BCM5708S_UP1_2G5;
1238                 bnx2_write_phy(bp, bp->mii_up1, up1);
1239                 ret = 0;
1240         }
1241
1242         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1243                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1244                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1245
1246         return ret;
1247 }
1248
1249 static int
1250 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1251 {
1252         u32 up1;
1253         int ret = 0;
1254
1255         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1256                 return 0;
1257
1258         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1259                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1260
1261         bnx2_read_phy(bp, bp->mii_up1, &up1);
1262         if (up1 & BCM5708S_UP1_2G5) {
1263                 up1 &= ~BCM5708S_UP1_2G5;
1264                 bnx2_write_phy(bp, bp->mii_up1, up1);
1265                 ret = 1;
1266         }
1267
1268         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1269                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1270                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1271
1272         return ret;
1273 }
1274
1275 static void
1276 bnx2_enable_forced_2g5(struct bnx2 *bp)
1277 {
1278         u32 bmcr;
1279
1280         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1281                 return;
1282
1283         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1284                 u32 val;
1285
1286                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1287                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1288                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1289                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1290                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1291                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1292
1293                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1294                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1295                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1296
1297         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1298                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1299                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1300         }
1301
1302         if (bp->autoneg & AUTONEG_SPEED) {
1303                 bmcr &= ~BMCR_ANENABLE;
1304                 if (bp->req_duplex == DUPLEX_FULL)
1305                         bmcr |= BMCR_FULLDPLX;
1306         }
1307         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1308 }
1309
1310 static void
1311 bnx2_disable_forced_2g5(struct bnx2 *bp)
1312 {
1313         u32 bmcr;
1314
1315         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1316                 return;
1317
1318         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1319                 u32 val;
1320
1321                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1322                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1323                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1324                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1325                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1326
1327                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1328                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1329                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1330
1331         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1332                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1333                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1334         }
1335
1336         if (bp->autoneg & AUTONEG_SPEED)
1337                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1338         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1339 }
1340
1341 static void
1342 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1343 {
1344         u32 val;
1345
1346         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1347         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1348         if (start)
1349                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1350         else
1351                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1352 }
1353
1354 static int
1355 bnx2_set_link(struct bnx2 *bp)
1356 {
1357         u32 bmsr;
1358         u8 link_up;
1359
1360         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1361                 bp->link_up = 1;
1362                 return 0;
1363         }
1364
1365         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1366                 return 0;
1367
1368         link_up = bp->link_up;
1369
1370         bnx2_enable_bmsr1(bp);
1371         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1372         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1373         bnx2_disable_bmsr1(bp);
1374
1375         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1376             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1377                 u32 val, an_dbg;
1378
1379                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1380                         bnx2_5706s_force_link_dn(bp, 0);
1381                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1382                 }
1383                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1384
1385                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1386                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1387                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1388
1389                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1390                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1391                         bmsr |= BMSR_LSTATUS;
1392                 else
1393                         bmsr &= ~BMSR_LSTATUS;
1394         }
1395
1396         if (bmsr & BMSR_LSTATUS) {
1397                 bp->link_up = 1;
1398
1399                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1400                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1401                                 bnx2_5706s_linkup(bp);
1402                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1403                                 bnx2_5708s_linkup(bp);
1404                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1405                                 bnx2_5709s_linkup(bp);
1406                 }
1407                 else {
1408                         bnx2_copper_linkup(bp);
1409                 }
1410                 bnx2_resolve_flow_ctrl(bp);
1411         }
1412         else {
1413                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1414                     (bp->autoneg & AUTONEG_SPEED))
1415                         bnx2_disable_forced_2g5(bp);
1416
1417                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1418                         u32 bmcr;
1419
1420                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1421                         bmcr |= BMCR_ANENABLE;
1422                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1423
1424                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1425                 }
1426                 bp->link_up = 0;
1427         }
1428
1429         if (bp->link_up != link_up) {
1430                 bnx2_report_link(bp);
1431         }
1432
1433         bnx2_set_mac_link(bp);
1434
1435         return 0;
1436 }
1437
1438 static int
1439 bnx2_reset_phy(struct bnx2 *bp)
1440 {
1441         int i;
1442         u32 reg;
1443
1444         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1445
1446 #define PHY_RESET_MAX_WAIT 100
1447         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1448                 udelay(10);
1449
1450                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1451                 if (!(reg & BMCR_RESET)) {
1452                         udelay(20);
1453                         break;
1454                 }
1455         }
1456         if (i == PHY_RESET_MAX_WAIT) {
1457                 return -EBUSY;
1458         }
1459         return 0;
1460 }
1461
1462 static u32
1463 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1464 {
1465         u32 adv = 0;
1466
1467         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1468                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1469
1470                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1471                         adv = ADVERTISE_1000XPAUSE;
1472                 }
1473                 else {
1474                         adv = ADVERTISE_PAUSE_CAP;
1475                 }
1476         }
1477         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1478                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1479                         adv = ADVERTISE_1000XPSE_ASYM;
1480                 }
1481                 else {
1482                         adv = ADVERTISE_PAUSE_ASYM;
1483                 }
1484         }
1485         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1486                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1487                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1488                 }
1489                 else {
1490                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1491                 }
1492         }
1493         return adv;
1494 }
1495
1496 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1497
1498 static int
1499 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1500 {
1501         u32 speed_arg = 0, pause_adv;
1502
1503         pause_adv = bnx2_phy_get_pause_adv(bp);
1504
1505         if (bp->autoneg & AUTONEG_SPEED) {
1506                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1507                 if (bp->advertising & ADVERTISED_10baseT_Half)
1508                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1509                 if (bp->advertising & ADVERTISED_10baseT_Full)
1510                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1511                 if (bp->advertising & ADVERTISED_100baseT_Half)
1512                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1513                 if (bp->advertising & ADVERTISED_100baseT_Full)
1514                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1515                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1516                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1517                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1518                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1519         } else {
1520                 if (bp->req_line_speed == SPEED_2500)
1521                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1522                 else if (bp->req_line_speed == SPEED_1000)
1523                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1524                 else if (bp->req_line_speed == SPEED_100) {
1525                         if (bp->req_duplex == DUPLEX_FULL)
1526                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1527                         else
1528                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1529                 } else if (bp->req_line_speed == SPEED_10) {
1530                         if (bp->req_duplex == DUPLEX_FULL)
1531                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1532                         else
1533                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1534                 }
1535         }
1536
1537         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1538                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1539         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1540                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1541
1542         if (port == PORT_TP)
1543                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1544                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1545
1546         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1547
1548         spin_unlock_bh(&bp->phy_lock);
1549         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1550         spin_lock_bh(&bp->phy_lock);
1551
1552         return 0;
1553 }
1554
1555 static int
1556 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1557 {
1558         u32 adv, bmcr;
1559         u32 new_adv = 0;
1560
1561         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1562                 return (bnx2_setup_remote_phy(bp, port));
1563
1564         if (!(bp->autoneg & AUTONEG_SPEED)) {
1565                 u32 new_bmcr;
1566                 int force_link_down = 0;
1567
1568                 if (bp->req_line_speed == SPEED_2500) {
1569                         if (!bnx2_test_and_enable_2g5(bp))
1570                                 force_link_down = 1;
1571                 } else if (bp->req_line_speed == SPEED_1000) {
1572                         if (bnx2_test_and_disable_2g5(bp))
1573                                 force_link_down = 1;
1574                 }
1575                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1576                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1577
1578                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1579                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1580                 new_bmcr |= BMCR_SPEED1000;
1581
1582                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1583                         if (bp->req_line_speed == SPEED_2500)
1584                                 bnx2_enable_forced_2g5(bp);
1585                         else if (bp->req_line_speed == SPEED_1000) {
1586                                 bnx2_disable_forced_2g5(bp);
1587                                 new_bmcr &= ~0x2000;
1588                         }
1589
1590                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1591                         if (bp->req_line_speed == SPEED_2500)
1592                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1593                         else
1594                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1595                 }
1596
1597                 if (bp->req_duplex == DUPLEX_FULL) {
1598                         adv |= ADVERTISE_1000XFULL;
1599                         new_bmcr |= BMCR_FULLDPLX;
1600                 }
1601                 else {
1602                         adv |= ADVERTISE_1000XHALF;
1603                         new_bmcr &= ~BMCR_FULLDPLX;
1604                 }
1605                 if ((new_bmcr != bmcr) || (force_link_down)) {
1606                         /* Force a link down visible on the other side */
1607                         if (bp->link_up) {
1608                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1609                                                ~(ADVERTISE_1000XFULL |
1610                                                  ADVERTISE_1000XHALF));
1611                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1612                                         BMCR_ANRESTART | BMCR_ANENABLE);
1613
1614                                 bp->link_up = 0;
1615                                 netif_carrier_off(bp->dev);
1616                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1617                                 bnx2_report_link(bp);
1618                         }
1619                         bnx2_write_phy(bp, bp->mii_adv, adv);
1620                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1621                 } else {
1622                         bnx2_resolve_flow_ctrl(bp);
1623                         bnx2_set_mac_link(bp);
1624                 }
1625                 return 0;
1626         }
1627
1628         bnx2_test_and_enable_2g5(bp);
1629
1630         if (bp->advertising & ADVERTISED_1000baseT_Full)
1631                 new_adv |= ADVERTISE_1000XFULL;
1632
1633         new_adv |= bnx2_phy_get_pause_adv(bp);
1634
1635         bnx2_read_phy(bp, bp->mii_adv, &adv);
1636         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1637
1638         bp->serdes_an_pending = 0;
1639         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1640                 /* Force a link down visible on the other side */
1641                 if (bp->link_up) {
1642                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1643                         spin_unlock_bh(&bp->phy_lock);
1644                         msleep(20);
1645                         spin_lock_bh(&bp->phy_lock);
1646                 }
1647
1648                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1649                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1650                         BMCR_ANENABLE);
1651                 /* Speed up link-up time when the link partner
1652                  * does not autonegotiate which is very common
1653                  * in blade servers. Some blade servers use
1654                  * IPMI for kerboard input and it's important
1655                  * to minimize link disruptions. Autoneg. involves
1656                  * exchanging base pages plus 3 next pages and
1657                  * normally completes in about 120 msec.
1658                  */
1659                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1660                 bp->serdes_an_pending = 1;
1661                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1662         } else {
1663                 bnx2_resolve_flow_ctrl(bp);
1664                 bnx2_set_mac_link(bp);
1665         }
1666
1667         return 0;
1668 }
1669
1670 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1671         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1672                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1673                 (ADVERTISED_1000baseT_Full)
1674
1675 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1676         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1677         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1678         ADVERTISED_1000baseT_Full)
1679
1680 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1681         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1682
1683 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1684
1685 static void
1686 bnx2_set_default_remote_link(struct bnx2 *bp)
1687 {
1688         u32 link;
1689
1690         if (bp->phy_port == PORT_TP)
1691                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1692         else
1693                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1694
1695         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1696                 bp->req_line_speed = 0;
1697                 bp->autoneg |= AUTONEG_SPEED;
1698                 bp->advertising = ADVERTISED_Autoneg;
1699                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1700                         bp->advertising |= ADVERTISED_10baseT_Half;
1701                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1702                         bp->advertising |= ADVERTISED_10baseT_Full;
1703                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1704                         bp->advertising |= ADVERTISED_100baseT_Half;
1705                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1706                         bp->advertising |= ADVERTISED_100baseT_Full;
1707                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1708                         bp->advertising |= ADVERTISED_1000baseT_Full;
1709                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1710                         bp->advertising |= ADVERTISED_2500baseX_Full;
1711         } else {
1712                 bp->autoneg = 0;
1713                 bp->advertising = 0;
1714                 bp->req_duplex = DUPLEX_FULL;
1715                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1716                         bp->req_line_speed = SPEED_10;
1717                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1718                                 bp->req_duplex = DUPLEX_HALF;
1719                 }
1720                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1721                         bp->req_line_speed = SPEED_100;
1722                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1723                                 bp->req_duplex = DUPLEX_HALF;
1724                 }
1725                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1726                         bp->req_line_speed = SPEED_1000;
1727                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1728                         bp->req_line_speed = SPEED_2500;
1729         }
1730 }
1731
1732 static void
1733 bnx2_set_default_link(struct bnx2 *bp)
1734 {
1735         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1736                 bnx2_set_default_remote_link(bp);
1737                 return;
1738         }
1739
1740         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1741         bp->req_line_speed = 0;
1742         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1743                 u32 reg;
1744
1745                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1746
1747                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1748                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1749                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1750                         bp->autoneg = 0;
1751                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1752                         bp->req_duplex = DUPLEX_FULL;
1753                 }
1754         } else
1755                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1756 }
1757
1758 static void
1759 bnx2_send_heart_beat(struct bnx2 *bp)
1760 {
1761         u32 msg;
1762         u32 addr;
1763
1764         spin_lock(&bp->indirect_lock);
1765         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1766         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1767         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1768         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1769         spin_unlock(&bp->indirect_lock);
1770 }
1771
1772 static void
1773 bnx2_remote_phy_event(struct bnx2 *bp)
1774 {
1775         u32 msg;
1776         u8 link_up = bp->link_up;
1777         u8 old_port;
1778
1779         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1780
1781         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1782                 bnx2_send_heart_beat(bp);
1783
1784         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1785
1786         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1787                 bp->link_up = 0;
1788         else {
1789                 u32 speed;
1790
1791                 bp->link_up = 1;
1792                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1793                 bp->duplex = DUPLEX_FULL;
1794                 switch (speed) {
1795                         case BNX2_LINK_STATUS_10HALF:
1796                                 bp->duplex = DUPLEX_HALF;
1797                         case BNX2_LINK_STATUS_10FULL:
1798                                 bp->line_speed = SPEED_10;
1799                                 break;
1800                         case BNX2_LINK_STATUS_100HALF:
1801                                 bp->duplex = DUPLEX_HALF;
1802                         case BNX2_LINK_STATUS_100BASE_T4:
1803                         case BNX2_LINK_STATUS_100FULL:
1804                                 bp->line_speed = SPEED_100;
1805                                 break;
1806                         case BNX2_LINK_STATUS_1000HALF:
1807                                 bp->duplex = DUPLEX_HALF;
1808                         case BNX2_LINK_STATUS_1000FULL:
1809                                 bp->line_speed = SPEED_1000;
1810                                 break;
1811                         case BNX2_LINK_STATUS_2500HALF:
1812                                 bp->duplex = DUPLEX_HALF;
1813                         case BNX2_LINK_STATUS_2500FULL:
1814                                 bp->line_speed = SPEED_2500;
1815                                 break;
1816                         default:
1817                                 bp->line_speed = 0;
1818                                 break;
1819                 }
1820
1821                 bp->flow_ctrl = 0;
1822                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1823                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1824                         if (bp->duplex == DUPLEX_FULL)
1825                                 bp->flow_ctrl = bp->req_flow_ctrl;
1826                 } else {
1827                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1828                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1829                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1830                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1831                 }
1832
1833                 old_port = bp->phy_port;
1834                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1835                         bp->phy_port = PORT_FIBRE;
1836                 else
1837                         bp->phy_port = PORT_TP;
1838
1839                 if (old_port != bp->phy_port)
1840                         bnx2_set_default_link(bp);
1841
1842         }
1843         if (bp->link_up != link_up)
1844                 bnx2_report_link(bp);
1845
1846         bnx2_set_mac_link(bp);
1847 }
1848
1849 static int
1850 bnx2_set_remote_link(struct bnx2 *bp)
1851 {
1852         u32 evt_code;
1853
1854         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1855         switch (evt_code) {
1856                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1857                         bnx2_remote_phy_event(bp);
1858                         break;
1859                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1860                 default:
1861                         bnx2_send_heart_beat(bp);
1862                         break;
1863         }
1864         return 0;
1865 }
1866
1867 static int
1868 bnx2_setup_copper_phy(struct bnx2 *bp)
1869 {
1870         u32 bmcr;
1871         u32 new_bmcr;
1872
1873         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1874
1875         if (bp->autoneg & AUTONEG_SPEED) {
1876                 u32 adv_reg, adv1000_reg;
1877                 u32 new_adv_reg = 0;
1878                 u32 new_adv1000_reg = 0;
1879
1880                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1881                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1882                         ADVERTISE_PAUSE_ASYM);
1883
1884                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1885                 adv1000_reg &= PHY_ALL_1000_SPEED;
1886
1887                 if (bp->advertising & ADVERTISED_10baseT_Half)
1888                         new_adv_reg |= ADVERTISE_10HALF;
1889                 if (bp->advertising & ADVERTISED_10baseT_Full)
1890                         new_adv_reg |= ADVERTISE_10FULL;
1891                 if (bp->advertising & ADVERTISED_100baseT_Half)
1892                         new_adv_reg |= ADVERTISE_100HALF;
1893                 if (bp->advertising & ADVERTISED_100baseT_Full)
1894                         new_adv_reg |= ADVERTISE_100FULL;
1895                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1896                         new_adv1000_reg |= ADVERTISE_1000FULL;
1897
1898                 new_adv_reg |= ADVERTISE_CSMA;
1899
1900                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1901
1902                 if ((adv1000_reg != new_adv1000_reg) ||
1903                         (adv_reg != new_adv_reg) ||
1904                         ((bmcr & BMCR_ANENABLE) == 0)) {
1905
1906                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1907                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1908                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1909                                 BMCR_ANENABLE);
1910                 }
1911                 else if (bp->link_up) {
1912                         /* Flow ctrl may have changed from auto to forced */
1913                         /* or vice-versa. */
1914
1915                         bnx2_resolve_flow_ctrl(bp);
1916                         bnx2_set_mac_link(bp);
1917                 }
1918                 return 0;
1919         }
1920
1921         new_bmcr = 0;
1922         if (bp->req_line_speed == SPEED_100) {
1923                 new_bmcr |= BMCR_SPEED100;
1924         }
1925         if (bp->req_duplex == DUPLEX_FULL) {
1926                 new_bmcr |= BMCR_FULLDPLX;
1927         }
1928         if (new_bmcr != bmcr) {
1929                 u32 bmsr;
1930
1931                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1932                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1933
1934                 if (bmsr & BMSR_LSTATUS) {
1935                         /* Force link down */
1936                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1937                         spin_unlock_bh(&bp->phy_lock);
1938                         msleep(50);
1939                         spin_lock_bh(&bp->phy_lock);
1940
1941                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1942                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1943                 }
1944
1945                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1946
1947                 /* Normally, the new speed is setup after the link has
1948                  * gone down and up again. In some cases, link will not go
1949                  * down so we need to set up the new speed here.
1950                  */
1951                 if (bmsr & BMSR_LSTATUS) {
1952                         bp->line_speed = bp->req_line_speed;
1953                         bp->duplex = bp->req_duplex;
1954                         bnx2_resolve_flow_ctrl(bp);
1955                         bnx2_set_mac_link(bp);
1956                 }
1957         } else {
1958                 bnx2_resolve_flow_ctrl(bp);
1959                 bnx2_set_mac_link(bp);
1960         }
1961         return 0;
1962 }
1963
1964 static int
1965 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1966 {
1967         if (bp->loopback == MAC_LOOPBACK)
1968                 return 0;
1969
1970         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1971                 return (bnx2_setup_serdes_phy(bp, port));
1972         }
1973         else {
1974                 return (bnx2_setup_copper_phy(bp));
1975         }
1976 }
1977
1978 static int
1979 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1980 {
1981         u32 val;
1982
1983         bp->mii_bmcr = MII_BMCR + 0x10;
1984         bp->mii_bmsr = MII_BMSR + 0x10;
1985         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1986         bp->mii_adv = MII_ADVERTISE + 0x10;
1987         bp->mii_lpa = MII_LPA + 0x10;
1988         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1989
1990         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1991         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1992
1993         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1994         if (reset_phy)
1995                 bnx2_reset_phy(bp);
1996
1997         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1998
1999         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2000         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2001         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2002         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2003
2004         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2005         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2006         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2007                 val |= BCM5708S_UP1_2G5;
2008         else
2009                 val &= ~BCM5708S_UP1_2G5;
2010         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2011
2012         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2013         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2014         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2015         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2016
2017         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2018
2019         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2020               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2021         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2022
2023         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2024
2025         return 0;
2026 }
2027
2028 static int
2029 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2030 {
2031         u32 val;
2032
2033         if (reset_phy)
2034                 bnx2_reset_phy(bp);
2035
2036         bp->mii_up1 = BCM5708S_UP1;
2037
2038         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2039         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2040         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2041
2042         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2043         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2044         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2045
2046         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2047         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2048         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2049
2050         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2051                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2052                 val |= BCM5708S_UP1_2G5;
2053                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2054         }
2055
2056         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2057             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2058             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2059                 /* increase tx signal amplitude */
2060                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2061                                BCM5708S_BLK_ADDR_TX_MISC);
2062                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2063                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2064                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2065                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2066         }
2067
2068         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2069               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2070
2071         if (val) {
2072                 u32 is_backplane;
2073
2074                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2075                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2076                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2077                                        BCM5708S_BLK_ADDR_TX_MISC);
2078                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2079                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2080                                        BCM5708S_BLK_ADDR_DIG);
2081                 }
2082         }
2083         return 0;
2084 }
2085
2086 static int
2087 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2088 {
2089         if (reset_phy)
2090                 bnx2_reset_phy(bp);
2091
2092         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2093
2094         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2095                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2096
2097         if (bp->dev->mtu > 1500) {
2098                 u32 val;
2099
2100                 /* Set extended packet length bit */
2101                 bnx2_write_phy(bp, 0x18, 0x7);
2102                 bnx2_read_phy(bp, 0x18, &val);
2103                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2104
2105                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2106                 bnx2_read_phy(bp, 0x1c, &val);
2107                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2108         }
2109         else {
2110                 u32 val;
2111
2112                 bnx2_write_phy(bp, 0x18, 0x7);
2113                 bnx2_read_phy(bp, 0x18, &val);
2114                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2115
2116                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2117                 bnx2_read_phy(bp, 0x1c, &val);
2118                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2119         }
2120
2121         return 0;
2122 }
2123
2124 static int
2125 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2126 {
2127         u32 val;
2128
2129         if (reset_phy)
2130                 bnx2_reset_phy(bp);
2131
2132         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2133                 bnx2_write_phy(bp, 0x18, 0x0c00);
2134                 bnx2_write_phy(bp, 0x17, 0x000a);
2135                 bnx2_write_phy(bp, 0x15, 0x310b);
2136                 bnx2_write_phy(bp, 0x17, 0x201f);
2137                 bnx2_write_phy(bp, 0x15, 0x9506);
2138                 bnx2_write_phy(bp, 0x17, 0x401f);
2139                 bnx2_write_phy(bp, 0x15, 0x14e2);
2140                 bnx2_write_phy(bp, 0x18, 0x0400);
2141         }
2142
2143         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2144                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2145                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2146                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2147                 val &= ~(1 << 8);
2148                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2149         }
2150
2151         if (bp->dev->mtu > 1500) {
2152                 /* Set extended packet length bit */
2153                 bnx2_write_phy(bp, 0x18, 0x7);
2154                 bnx2_read_phy(bp, 0x18, &val);
2155                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2156
2157                 bnx2_read_phy(bp, 0x10, &val);
2158                 bnx2_write_phy(bp, 0x10, val | 0x1);
2159         }
2160         else {
2161                 bnx2_write_phy(bp, 0x18, 0x7);
2162                 bnx2_read_phy(bp, 0x18, &val);
2163                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2164
2165                 bnx2_read_phy(bp, 0x10, &val);
2166                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2167         }
2168
2169         /* ethernet@wirespeed */
2170         bnx2_write_phy(bp, 0x18, 0x7007);
2171         bnx2_read_phy(bp, 0x18, &val);
2172         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2173         return 0;
2174 }
2175
2176
2177 static int
2178 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2179 {
2180         u32 val;
2181         int rc = 0;
2182
2183         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2184         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2185
2186         bp->mii_bmcr = MII_BMCR;
2187         bp->mii_bmsr = MII_BMSR;
2188         bp->mii_bmsr1 = MII_BMSR;
2189         bp->mii_adv = MII_ADVERTISE;
2190         bp->mii_lpa = MII_LPA;
2191
2192         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2193
2194         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2195                 goto setup_phy;
2196
2197         bnx2_read_phy(bp, MII_PHYSID1, &val);
2198         bp->phy_id = val << 16;
2199         bnx2_read_phy(bp, MII_PHYSID2, &val);
2200         bp->phy_id |= val & 0xffff;
2201
2202         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2203                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2204                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2205                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2206                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2207                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2208                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2209         }
2210         else {
2211                 rc = bnx2_init_copper_phy(bp, reset_phy);
2212         }
2213
2214 setup_phy:
2215         if (!rc)
2216                 rc = bnx2_setup_phy(bp, bp->phy_port);
2217
2218         return rc;
2219 }
2220
2221 static int
2222 bnx2_set_mac_loopback(struct bnx2 *bp)
2223 {
2224         u32 mac_mode;
2225
2226         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2227         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2228         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2229         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2230         bp->link_up = 1;
2231         return 0;
2232 }
2233
2234 static int bnx2_test_link(struct bnx2 *);
2235
2236 static int
2237 bnx2_set_phy_loopback(struct bnx2 *bp)
2238 {
2239         u32 mac_mode;
2240         int rc, i;
2241
2242         spin_lock_bh(&bp->phy_lock);
2243         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2244                             BMCR_SPEED1000);
2245         spin_unlock_bh(&bp->phy_lock);
2246         if (rc)
2247                 return rc;
2248
2249         for (i = 0; i < 10; i++) {
2250                 if (bnx2_test_link(bp) == 0)
2251                         break;
2252                 msleep(100);
2253         }
2254
2255         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2256         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2257                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2258                       BNX2_EMAC_MODE_25G_MODE);
2259
2260         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2261         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2262         bp->link_up = 1;
2263         return 0;
2264 }
2265
2266 static int
2267 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2268 {
2269         int i;
2270         u32 val;
2271
2272         bp->fw_wr_seq++;
2273         msg_data |= bp->fw_wr_seq;
2274
2275         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2276
2277         if (!ack)
2278                 return 0;
2279
2280         /* wait for an acknowledgement. */
2281         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2282                 msleep(10);
2283
2284                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2285
2286                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2287                         break;
2288         }
2289         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2290                 return 0;
2291
2292         /* If we timed out, inform the firmware that this is the case. */
2293         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2294                 if (!silent)
2295                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2296                                             "%x\n", msg_data);
2297
2298                 msg_data &= ~BNX2_DRV_MSG_CODE;
2299                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2300
2301                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2302
2303                 return -EBUSY;
2304         }
2305
2306         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2307                 return -EIO;
2308
2309         return 0;
2310 }
2311
2312 static int
2313 bnx2_init_5709_context(struct bnx2 *bp)
2314 {
2315         int i, ret = 0;
2316         u32 val;
2317
2318         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2319         val |= (BCM_PAGE_BITS - 8) << 16;
2320         REG_WR(bp, BNX2_CTX_COMMAND, val);
2321         for (i = 0; i < 10; i++) {
2322                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2323                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2324                         break;
2325                 udelay(2);
2326         }
2327         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2328                 return -EBUSY;
2329
2330         for (i = 0; i < bp->ctx_pages; i++) {
2331                 int j;
2332
2333                 if (bp->ctx_blk[i])
2334                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2335                 else
2336                         return -ENOMEM;
2337
2338                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2339                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2340                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2341                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2342                        (u64) bp->ctx_blk_mapping[i] >> 32);
2343                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2344                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2345                 for (j = 0; j < 10; j++) {
2346
2347                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2348                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2349                                 break;
2350                         udelay(5);
2351                 }
2352                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2353                         ret = -EBUSY;
2354                         break;
2355                 }
2356         }
2357         return ret;
2358 }
2359
2360 static void
2361 bnx2_init_context(struct bnx2 *bp)
2362 {
2363         u32 vcid;
2364
2365         vcid = 96;
2366         while (vcid) {
2367                 u32 vcid_addr, pcid_addr, offset;
2368                 int i;
2369
2370                 vcid--;
2371
2372                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2373                         u32 new_vcid;
2374
2375                         vcid_addr = GET_PCID_ADDR(vcid);
2376                         if (vcid & 0x8) {
2377                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2378                         }
2379                         else {
2380                                 new_vcid = vcid;
2381                         }
2382                         pcid_addr = GET_PCID_ADDR(new_vcid);
2383                 }
2384                 else {
2385                         vcid_addr = GET_CID_ADDR(vcid);
2386                         pcid_addr = vcid_addr;
2387                 }
2388
2389                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2390                         vcid_addr += (i << PHY_CTX_SHIFT);
2391                         pcid_addr += (i << PHY_CTX_SHIFT);
2392
2393                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2394                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2395
2396                         /* Zero out the context. */
2397                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2398                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2399                 }
2400         }
2401 }
2402
2403 static int
2404 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2405 {
2406         u16 *good_mbuf;
2407         u32 good_mbuf_cnt;
2408         u32 val;
2409
2410         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2411         if (good_mbuf == NULL) {
2412                 printk(KERN_ERR PFX "Failed to allocate memory in "
2413                                     "bnx2_alloc_bad_rbuf\n");
2414                 return -ENOMEM;
2415         }
2416
2417         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2418                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2419
2420         good_mbuf_cnt = 0;
2421
2422         /* Allocate a bunch of mbufs and save the good ones in an array. */
2423         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2424         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2425                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2426                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2427
2428                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2429
2430                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2431
2432                 /* The addresses with Bit 9 set are bad memory blocks. */
2433                 if (!(val & (1 << 9))) {
2434                         good_mbuf[good_mbuf_cnt] = (u16) val;
2435                         good_mbuf_cnt++;
2436                 }
2437
2438                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2439         }
2440
2441         /* Free the good ones back to the mbuf pool thus discarding
2442          * all the bad ones. */
2443         while (good_mbuf_cnt) {
2444                 good_mbuf_cnt--;
2445
2446                 val = good_mbuf[good_mbuf_cnt];
2447                 val = (val << 9) | val | 1;
2448
2449                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2450         }
2451         kfree(good_mbuf);
2452         return 0;
2453 }
2454
2455 static void
2456 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2457 {
2458         u32 val;
2459
2460         val = (mac_addr[0] << 8) | mac_addr[1];
2461
2462         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2463
2464         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2465                 (mac_addr[4] << 8) | mac_addr[5];
2466
2467         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2468 }
2469
2470 static inline int
2471 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2472 {
2473         dma_addr_t mapping;
2474         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2475         struct rx_bd *rxbd =
2476                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2477         struct page *page = alloc_page(GFP_ATOMIC);
2478
2479         if (!page)
2480                 return -ENOMEM;
2481         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2482                                PCI_DMA_FROMDEVICE);
2483         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2484                 __free_page(page);
2485                 return -EIO;
2486         }
2487
2488         rx_pg->page = page;
2489         pci_unmap_addr_set(rx_pg, mapping, mapping);
2490         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2491         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2492         return 0;
2493 }
2494
2495 static void
2496 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2497 {
2498         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2499         struct page *page = rx_pg->page;
2500
2501         if (!page)
2502                 return;
2503
2504         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2505                        PCI_DMA_FROMDEVICE);
2506
2507         __free_page(page);
2508         rx_pg->page = NULL;
2509 }
2510
2511 static inline int
2512 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2513 {
2514         struct sk_buff *skb;
2515         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2516         dma_addr_t mapping;
2517         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2518         unsigned long align;
2519
2520         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2521         if (skb == NULL) {
2522                 return -ENOMEM;
2523         }
2524
2525         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2526                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2527
2528         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2529                 PCI_DMA_FROMDEVICE);
2530         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2531                 dev_kfree_skb(skb);
2532                 return -EIO;
2533         }
2534
2535         rx_buf->skb = skb;
2536         pci_unmap_addr_set(rx_buf, mapping, mapping);
2537
2538         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2539         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2540
2541         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2542
2543         return 0;
2544 }
2545
2546 static int
2547 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2548 {
2549         struct status_block *sblk = bnapi->status_blk.msi;
2550         u32 new_link_state, old_link_state;
2551         int is_set = 1;
2552
2553         new_link_state = sblk->status_attn_bits & event;
2554         old_link_state = sblk->status_attn_bits_ack & event;
2555         if (new_link_state != old_link_state) {
2556                 if (new_link_state)
2557                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2558                 else
2559                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2560         } else
2561                 is_set = 0;
2562
2563         return is_set;
2564 }
2565
2566 static void
2567 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2568 {
2569         spin_lock(&bp->phy_lock);
2570
2571         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2572                 bnx2_set_link(bp);
2573         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2574                 bnx2_set_remote_link(bp);
2575
2576         spin_unlock(&bp->phy_lock);
2577
2578 }
2579
2580 static inline u16
2581 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2582 {
2583         u16 cons;
2584
2585         /* Tell compiler that status block fields can change. */
2586         barrier();
2587         cons = *bnapi->hw_tx_cons_ptr;
2588         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2589                 cons++;
2590         return cons;
2591 }
2592
2593 static int
2594 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2595 {
2596         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2597         u16 hw_cons, sw_cons, sw_ring_cons;
2598         int tx_pkt = 0, index;
2599         struct netdev_queue *txq;
2600
2601         index = (bnapi - bp->bnx2_napi);
2602         txq = netdev_get_tx_queue(bp->dev, index);
2603
2604         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2605         sw_cons = txr->tx_cons;
2606
2607         while (sw_cons != hw_cons) {
2608                 struct sw_tx_bd *tx_buf;
2609                 struct sk_buff *skb;
2610                 int i, last;
2611
2612                 sw_ring_cons = TX_RING_IDX(sw_cons);
2613
2614                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2615                 skb = tx_buf->skb;
2616
2617                 /* partial BD completions possible with TSO packets */
2618                 if (skb_is_gso(skb)) {
2619                         u16 last_idx, last_ring_idx;
2620
2621                         last_idx = sw_cons +
2622                                 skb_shinfo(skb)->nr_frags + 1;
2623                         last_ring_idx = sw_ring_cons +
2624                                 skb_shinfo(skb)->nr_frags + 1;
2625                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2626                                 last_idx++;
2627                         }
2628                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2629                                 break;
2630                         }
2631                 }
2632
2633                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2634
2635                 tx_buf->skb = NULL;
2636                 last = skb_shinfo(skb)->nr_frags;
2637
2638                 for (i = 0; i < last; i++) {
2639                         sw_cons = NEXT_TX_BD(sw_cons);
2640                 }
2641
2642                 sw_cons = NEXT_TX_BD(sw_cons);
2643
2644                 dev_kfree_skb(skb);
2645                 tx_pkt++;
2646                 if (tx_pkt == budget)
2647                         break;
2648
2649                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2650         }
2651
2652         txr->hw_tx_cons = hw_cons;
2653         txr->tx_cons = sw_cons;
2654
2655         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2656          * before checking for netif_tx_queue_stopped().  Without the
2657          * memory barrier, there is a small possibility that bnx2_start_xmit()
2658          * will miss it and cause the queue to be stopped forever.
2659          */
2660         smp_mb();
2661
2662         if (unlikely(netif_tx_queue_stopped(txq)) &&
2663                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2664                 __netif_tx_lock(txq, smp_processor_id());
2665                 if ((netif_tx_queue_stopped(txq)) &&
2666                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2667                         netif_tx_wake_queue(txq);
2668                 __netif_tx_unlock(txq);
2669         }
2670
2671         return tx_pkt;
2672 }
2673
2674 static void
2675 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2676                         struct sk_buff *skb, int count)
2677 {
2678         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2679         struct rx_bd *cons_bd, *prod_bd;
2680         int i;
2681         u16 hw_prod, prod;
2682         u16 cons = rxr->rx_pg_cons;
2683
2684         cons_rx_pg = &rxr->rx_pg_ring[cons];
2685
2686         /* The caller was unable to allocate a new page to replace the
2687          * last one in the frags array, so we need to recycle that page
2688          * and then free the skb.
2689          */
2690         if (skb) {
2691                 struct page *page;
2692                 struct skb_shared_info *shinfo;
2693
2694                 shinfo = skb_shinfo(skb);
2695                 shinfo->nr_frags--;
2696                 page = shinfo->frags[shinfo->nr_frags].page;
2697                 shinfo->frags[shinfo->nr_frags].page = NULL;
2698
2699                 cons_rx_pg->page = page;
2700                 dev_kfree_skb(skb);
2701         }
2702
2703         hw_prod = rxr->rx_pg_prod;
2704
2705         for (i = 0; i < count; i++) {
2706                 prod = RX_PG_RING_IDX(hw_prod);
2707
2708                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2709                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2710                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2711                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2712
2713                 if (prod != cons) {
2714                         prod_rx_pg->page = cons_rx_pg->page;
2715                         cons_rx_pg->page = NULL;
2716                         pci_unmap_addr_set(prod_rx_pg, mapping,
2717                                 pci_unmap_addr(cons_rx_pg, mapping));
2718
2719                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2720                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2721
2722                 }
2723                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2724                 hw_prod = NEXT_RX_BD(hw_prod);
2725         }
2726         rxr->rx_pg_prod = hw_prod;
2727         rxr->rx_pg_cons = cons;
2728 }
2729
2730 static inline void
2731 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2732                   struct sk_buff *skb, u16 cons, u16 prod)
2733 {
2734         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2735         struct rx_bd *cons_bd, *prod_bd;
2736
2737         cons_rx_buf = &rxr->rx_buf_ring[cons];
2738         prod_rx_buf = &rxr->rx_buf_ring[prod];
2739
2740         pci_dma_sync_single_for_device(bp->pdev,
2741                 pci_unmap_addr(cons_rx_buf, mapping),
2742                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2743
2744         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2745
2746         prod_rx_buf->skb = skb;
2747
2748         if (cons == prod)
2749                 return;
2750
2751         pci_unmap_addr_set(prod_rx_buf, mapping,
2752                         pci_unmap_addr(cons_rx_buf, mapping));
2753
2754         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2755         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2756         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2757         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2758 }
2759
2760 static int
2761 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2762             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2763             u32 ring_idx)
2764 {
2765         int err;
2766         u16 prod = ring_idx & 0xffff;
2767
2768         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2769         if (unlikely(err)) {
2770                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2771                 if (hdr_len) {
2772                         unsigned int raw_len = len + 4;
2773                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2774
2775                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2776                 }
2777                 return err;
2778         }
2779
2780         skb_reserve(skb, BNX2_RX_OFFSET);
2781         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2782                          PCI_DMA_FROMDEVICE);
2783
2784         if (hdr_len == 0) {
2785                 skb_put(skb, len);
2786                 return 0;
2787         } else {
2788                 unsigned int i, frag_len, frag_size, pages;
2789                 struct sw_pg *rx_pg;
2790                 u16 pg_cons = rxr->rx_pg_cons;
2791                 u16 pg_prod = rxr->rx_pg_prod;
2792
2793                 frag_size = len + 4 - hdr_len;
2794                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2795                 skb_put(skb, hdr_len);
2796
2797                 for (i = 0; i < pages; i++) {
2798                         dma_addr_t mapping_old;
2799
2800                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2801                         if (unlikely(frag_len <= 4)) {
2802                                 unsigned int tail = 4 - frag_len;
2803
2804                                 rxr->rx_pg_cons = pg_cons;
2805                                 rxr->rx_pg_prod = pg_prod;
2806                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2807                                                         pages - i);
2808                                 skb->len -= tail;
2809                                 if (i == 0) {
2810                                         skb->tail -= tail;
2811                                 } else {
2812                                         skb_frag_t *frag =
2813                                                 &skb_shinfo(skb)->frags[i - 1];
2814                                         frag->size -= tail;
2815                                         skb->data_len -= tail;
2816                                         skb->truesize -= tail;
2817                                 }
2818                                 return 0;
2819                         }
2820                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2821
2822                         /* Don't unmap yet.  If we're unable to allocate a new
2823                          * page, we need to recycle the page and the DMA addr.
2824                          */
2825                         mapping_old = pci_unmap_addr(rx_pg, mapping);
2826                         if (i == pages - 1)
2827                                 frag_len -= 4;
2828
2829                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2830                         rx_pg->page = NULL;
2831
2832                         err = bnx2_alloc_rx_page(bp, rxr,
2833                                                  RX_PG_RING_IDX(pg_prod));
2834                         if (unlikely(err)) {
2835                                 rxr->rx_pg_cons = pg_cons;
2836                                 rxr->rx_pg_prod = pg_prod;
2837                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2838                                                         pages - i);
2839                                 return err;
2840                         }
2841
2842                         pci_unmap_page(bp->pdev, mapping_old,
2843                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2844
2845                         frag_size -= frag_len;
2846                         skb->data_len += frag_len;
2847                         skb->truesize += frag_len;
2848                         skb->len += frag_len;
2849
2850                         pg_prod = NEXT_RX_BD(pg_prod);
2851                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2852                 }
2853                 rxr->rx_pg_prod = pg_prod;
2854                 rxr->rx_pg_cons = pg_cons;
2855         }
2856         return 0;
2857 }
2858
2859 static inline u16
2860 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2861 {
2862         u16 cons;
2863
2864         /* Tell compiler that status block fields can change. */
2865         barrier();
2866         cons = *bnapi->hw_rx_cons_ptr;
2867         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2868                 cons++;
2869         return cons;
2870 }
2871
2872 static int
2873 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2874 {
2875         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2876         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2877         struct l2_fhdr *rx_hdr;
2878         int rx_pkt = 0, pg_ring_used = 0;
2879
2880         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2881         sw_cons = rxr->rx_cons;
2882         sw_prod = rxr->rx_prod;
2883
2884         /* Memory barrier necessary as speculative reads of the rx
2885          * buffer can be ahead of the index in the status block
2886          */
2887         rmb();
2888         while (sw_cons != hw_cons) {
2889                 unsigned int len, hdr_len;
2890                 u32 status;
2891                 struct sw_bd *rx_buf;
2892                 struct sk_buff *skb;
2893                 dma_addr_t dma_addr;
2894                 u16 vtag = 0;
2895                 int hw_vlan __maybe_unused = 0;
2896
2897                 sw_ring_cons = RX_RING_IDX(sw_cons);
2898                 sw_ring_prod = RX_RING_IDX(sw_prod);
2899
2900                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2901                 skb = rx_buf->skb;
2902
2903                 rx_buf->skb = NULL;
2904
2905                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2906
2907                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2908                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2909                         PCI_DMA_FROMDEVICE);
2910
2911                 rx_hdr = (struct l2_fhdr *) skb->data;
2912                 len = rx_hdr->l2_fhdr_pkt_len;
2913
2914                 if ((status = rx_hdr->l2_fhdr_status) &
2915                         (L2_FHDR_ERRORS_BAD_CRC |
2916                         L2_FHDR_ERRORS_PHY_DECODE |
2917                         L2_FHDR_ERRORS_ALIGNMENT |
2918                         L2_FHDR_ERRORS_TOO_SHORT |
2919                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2920
2921                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2922                                           sw_ring_prod);
2923                         goto next_rx;
2924                 }
2925                 hdr_len = 0;
2926                 if (status & L2_FHDR_STATUS_SPLIT) {
2927                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2928                         pg_ring_used = 1;
2929                 } else if (len > bp->rx_jumbo_thresh) {
2930                         hdr_len = bp->rx_jumbo_thresh;
2931                         pg_ring_used = 1;
2932                 }
2933
2934                 len -= 4;
2935
2936                 if (len <= bp->rx_copy_thresh) {
2937                         struct sk_buff *new_skb;
2938
2939                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
2940                         if (new_skb == NULL) {
2941                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2942                                                   sw_ring_prod);
2943                                 goto next_rx;
2944                         }
2945
2946                         /* aligned copy */
2947                         skb_copy_from_linear_data_offset(skb,
2948                                                          BNX2_RX_OFFSET - 6,
2949                                       new_skb->data, len + 6);
2950                         skb_reserve(new_skb, 6);
2951                         skb_put(new_skb, len);
2952
2953                         bnx2_reuse_rx_skb(bp, rxr, skb,
2954                                 sw_ring_cons, sw_ring_prod);
2955
2956                         skb = new_skb;
2957                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2958                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2959                         goto next_rx;
2960
2961                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2962                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2963                         vtag = rx_hdr->l2_fhdr_vlan_tag;
2964 #ifdef BCM_VLAN
2965                         if (bp->vlgrp)
2966                                 hw_vlan = 1;
2967                         else
2968 #endif
2969                         {
2970                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2971                                         __skb_push(skb, 4);
2972
2973                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
2974                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
2975                                 ve->h_vlan_TCI = htons(vtag);
2976                                 len += 4;
2977                         }
2978                 }
2979
2980                 skb->protocol = eth_type_trans(skb, bp->dev);
2981
2982                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2983                         (ntohs(skb->protocol) != 0x8100)) {
2984
2985                         dev_kfree_skb(skb);
2986                         goto next_rx;
2987
2988                 }
2989
2990                 skb->ip_summed = CHECKSUM_NONE;
2991                 if (bp->rx_csum &&
2992                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2993                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2994
2995                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2996                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2997                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2998                 }
2999
3000 #ifdef BCM_VLAN
3001                 if (hw_vlan)
3002                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3003                 else
3004 #endif
3005                         netif_receive_skb(skb);
3006
3007                 rx_pkt++;
3008
3009 next_rx:
3010                 sw_cons = NEXT_RX_BD(sw_cons);
3011                 sw_prod = NEXT_RX_BD(sw_prod);
3012
3013                 if ((rx_pkt == budget))
3014                         break;
3015
3016                 /* Refresh hw_cons to see if there is new work */
3017                 if (sw_cons == hw_cons) {
3018                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3019                         rmb();
3020                 }
3021         }
3022         rxr->rx_cons = sw_cons;
3023         rxr->rx_prod = sw_prod;
3024
3025         if (pg_ring_used)
3026                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3027
3028         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3029
3030         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3031
3032         mmiowb();
3033
3034         return rx_pkt;
3035
3036 }
3037
3038 /* MSI ISR - The only difference between this and the INTx ISR
3039  * is that the MSI interrupt is always serviced.
3040  */
3041 static irqreturn_t
3042 bnx2_msi(int irq, void *dev_instance)
3043 {
3044         struct bnx2_napi *bnapi = dev_instance;
3045         struct bnx2 *bp = bnapi->bp;
3046         struct net_device *dev = bp->dev;
3047
3048         prefetch(bnapi->status_blk.msi);
3049         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3050                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3051                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3052
3053         /* Return here if interrupt is disabled. */
3054         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3055                 return IRQ_HANDLED;
3056
3057         netif_rx_schedule(dev, &bnapi->napi);
3058
3059         return IRQ_HANDLED;
3060 }
3061
3062 static irqreturn_t
3063 bnx2_msi_1shot(int irq, void *dev_instance)
3064 {
3065         struct bnx2_napi *bnapi = dev_instance;
3066         struct bnx2 *bp = bnapi->bp;
3067         struct net_device *dev = bp->dev;
3068
3069         prefetch(bnapi->status_blk.msi);
3070
3071         /* Return here if interrupt is disabled. */
3072         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3073                 return IRQ_HANDLED;
3074
3075         netif_rx_schedule(dev, &bnapi->napi);
3076
3077         return IRQ_HANDLED;
3078 }
3079
3080 static irqreturn_t
3081 bnx2_interrupt(int irq, void *dev_instance)
3082 {
3083         struct bnx2_napi *bnapi = dev_instance;
3084         struct bnx2 *bp = bnapi->bp;
3085         struct net_device *dev = bp->dev;
3086         struct status_block *sblk = bnapi->status_blk.msi;
3087
3088         /* When using INTx, it is possible for the interrupt to arrive
3089          * at the CPU before the status block posted prior to the
3090          * interrupt. Reading a register will flush the status block.
3091          * When using MSI, the MSI message will always complete after
3092          * the status block write.
3093          */
3094         if ((sblk->status_idx == bnapi->last_status_idx) &&
3095             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3096              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3097                 return IRQ_NONE;
3098
3099         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3100                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3101                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3102
3103         /* Read back to deassert IRQ immediately to avoid too many
3104          * spurious interrupts.
3105          */
3106         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3107
3108         /* Return here if interrupt is shared and is disabled. */
3109         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3110                 return IRQ_HANDLED;
3111
3112         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3113                 bnapi->last_status_idx = sblk->status_idx;
3114                 __netif_rx_schedule(dev, &bnapi->napi);
3115         }
3116
3117         return IRQ_HANDLED;
3118 }
3119
3120 static inline int
3121 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3122 {
3123         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3124         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3125
3126         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3127             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3128                 return 1;
3129         return 0;
3130 }
3131
3132 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3133                                  STATUS_ATTN_BITS_TIMER_ABORT)
3134
3135 static inline int
3136 bnx2_has_work(struct bnx2_napi *bnapi)
3137 {
3138         struct status_block *sblk = bnapi->status_blk.msi;
3139
3140         if (bnx2_has_fast_work(bnapi))
3141                 return 1;
3142
3143         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3144             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3145                 return 1;
3146
3147         return 0;
3148 }
3149
3150 static void
3151 bnx2_chk_missed_msi(struct bnx2 *bp)
3152 {
3153         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3154         u32 msi_ctrl;
3155
3156         if (bnx2_has_work(bnapi)) {
3157                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3158                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3159                         return;
3160
3161                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3162                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3163                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3164                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3165                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3166                 }
3167         }
3168
3169         bp->idle_chk_status_idx = bnapi->last_status_idx;
3170 }
3171
3172 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3173 {
3174         struct status_block *sblk = bnapi->status_blk.msi;
3175         u32 status_attn_bits = sblk->status_attn_bits;
3176         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3177
3178         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3179             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3180
3181                 bnx2_phy_int(bp, bnapi);
3182
3183                 /* This is needed to take care of transient status
3184                  * during link changes.
3185                  */
3186                 REG_WR(bp, BNX2_HC_COMMAND,
3187                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3188                 REG_RD(bp, BNX2_HC_COMMAND);
3189         }
3190 }
3191
3192 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3193                           int work_done, int budget)
3194 {
3195         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3196         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3197
3198         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3199                 bnx2_tx_int(bp, bnapi, 0);
3200
3201         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3202                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3203
3204         return work_done;
3205 }
3206
3207 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3208 {
3209         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3210         struct bnx2 *bp = bnapi->bp;
3211         int work_done = 0;
3212         struct status_block_msix *sblk = bnapi->status_blk.msix;
3213
3214         while (1) {
3215                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3216                 if (unlikely(work_done >= budget))
3217                         break;
3218
3219                 bnapi->last_status_idx = sblk->status_idx;
3220                 /* status idx must be read before checking for more work. */
3221                 rmb();
3222                 if (likely(!bnx2_has_fast_work(bnapi))) {
3223
3224                         netif_rx_complete(bp->dev, napi);
3225                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3226                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3227                                bnapi->last_status_idx);
3228                         break;
3229                 }
3230         }
3231         return work_done;
3232 }
3233
3234 static int bnx2_poll(struct napi_struct *napi, int budget)
3235 {
3236         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3237         struct bnx2 *bp = bnapi->bp;
3238         int work_done = 0;
3239         struct status_block *sblk = bnapi->status_blk.msi;
3240
3241         while (1) {
3242                 bnx2_poll_link(bp, bnapi);
3243
3244                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3245
3246                 /* bnapi->last_status_idx is used below to tell the hw how
3247                  * much work has been processed, so we must read it before
3248                  * checking for more work.
3249                  */
3250                 bnapi->last_status_idx = sblk->status_idx;
3251
3252                 if (unlikely(work_done >= budget))
3253                         break;
3254
3255                 rmb();
3256                 if (likely(!bnx2_has_work(bnapi))) {
3257                         netif_rx_complete(bp->dev, napi);
3258                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3259                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3260                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3261                                        bnapi->last_status_idx);
3262                                 break;
3263                         }
3264                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3265                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3266                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3267                                bnapi->last_status_idx);
3268
3269                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3270                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3271                                bnapi->last_status_idx);
3272                         break;
3273                 }
3274         }
3275
3276         return work_done;
3277 }
3278
3279 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3280  * from set_multicast.
3281  */
3282 static void
3283 bnx2_set_rx_mode(struct net_device *dev)
3284 {
3285         struct bnx2 *bp = netdev_priv(dev);
3286         u32 rx_mode, sort_mode;
3287         struct dev_addr_list *uc_ptr;
3288         int i;
3289
3290         if (!netif_running(dev))
3291                 return;
3292
3293         spin_lock_bh(&bp->phy_lock);
3294
3295         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3296                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3297         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3298 #ifdef BCM_VLAN
3299         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3300                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3301 #else
3302         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3303                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3304 #endif
3305         if (dev->flags & IFF_PROMISC) {
3306                 /* Promiscuous mode. */
3307                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3308                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3309                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3310         }
3311         else if (dev->flags & IFF_ALLMULTI) {
3312                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3313                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3314                                0xffffffff);
3315                 }
3316                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3317         }
3318         else {
3319                 /* Accept one or more multicast(s). */
3320                 struct dev_mc_list *mclist;
3321                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3322                 u32 regidx;
3323                 u32 bit;
3324                 u32 crc;
3325
3326                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3327
3328                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3329                      i++, mclist = mclist->next) {
3330
3331                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3332                         bit = crc & 0xff;
3333                         regidx = (bit & 0xe0) >> 5;
3334                         bit &= 0x1f;
3335                         mc_filter[regidx] |= (1 << bit);
3336                 }
3337
3338                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3339                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3340                                mc_filter[i]);
3341                 }
3342
3343                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3344         }
3345
3346         uc_ptr = NULL;
3347         if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3348                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3349                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3350                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3351         } else if (!(dev->flags & IFF_PROMISC)) {
3352                 uc_ptr = dev->uc_list;
3353
3354                 /* Add all entries into to the match filter list */
3355                 for (i = 0; i < dev->uc_count; i++) {
3356                         bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3357                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3358                         sort_mode |= (1 <<
3359                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3360                         uc_ptr = uc_ptr->next;
3361                 }
3362
3363         }
3364
3365         if (rx_mode != bp->rx_mode) {
3366                 bp->rx_mode = rx_mode;
3367                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3368         }
3369
3370         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3371         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3372         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3373
3374         spin_unlock_bh(&bp->phy_lock);
3375 }
3376
3377 static void
3378 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3379         u32 rv2p_proc)
3380 {
3381         int i;
3382         u32 val;
3383
3384         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3385                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3386                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3387                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3388                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3389         }
3390
3391         for (i = 0; i < rv2p_code_len; i += 8) {
3392                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3393                 rv2p_code++;
3394                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3395                 rv2p_code++;
3396
3397                 if (rv2p_proc == RV2P_PROC1) {
3398                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3399                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3400                 }
3401                 else {
3402                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3403                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3404                 }
3405         }
3406
3407         /* Reset the processor, un-stall is done later. */
3408         if (rv2p_proc == RV2P_PROC1) {
3409                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3410         }
3411         else {
3412                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3413         }
3414 }
3415
3416 static int
3417 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3418 {
3419         u32 offset;
3420         u32 val;
3421         int rc;
3422
3423         /* Halt the CPU. */
3424         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3425         val |= cpu_reg->mode_value_halt;
3426         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3427         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3428
3429         /* Load the Text area. */
3430         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3431         if (fw->gz_text) {
3432                 int j;
3433
3434                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3435                                        fw->gz_text_len);
3436                 if (rc < 0)
3437                         return rc;
3438
3439                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3440                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3441                 }
3442         }
3443
3444         /* Load the Data area. */
3445         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3446         if (fw->data) {
3447                 int j;
3448
3449                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3450                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3451                 }
3452         }
3453
3454         /* Load the SBSS area. */
3455         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3456         if (fw->sbss_len) {
3457                 int j;
3458
3459                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3460                         bnx2_reg_wr_ind(bp, offset, 0);
3461                 }
3462         }
3463
3464         /* Load the BSS area. */
3465         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3466         if (fw->bss_len) {
3467                 int j;
3468
3469                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3470                         bnx2_reg_wr_ind(bp, offset, 0);
3471                 }
3472         }
3473
3474         /* Load the Read-Only area. */
3475         offset = cpu_reg->spad_base +
3476                 (fw->rodata_addr - cpu_reg->mips_view_base);
3477         if (fw->rodata) {
3478                 int j;
3479
3480                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3481                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3482                 }
3483         }
3484
3485         /* Clear the pre-fetch instruction. */
3486         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3487         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3488
3489         /* Start the CPU. */
3490         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3491         val &= ~cpu_reg->mode_value_halt;
3492         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3493         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3494
3495         return 0;
3496 }
3497
3498 static int
3499 bnx2_init_cpus(struct bnx2 *bp)
3500 {
3501         struct fw_info *fw;
3502         int rc, rv2p_len;
3503         void *text, *rv2p;
3504
3505         /* Initialize the RV2P processor. */
3506         text = vmalloc(FW_BUF_SIZE);
3507         if (!text)
3508                 return -ENOMEM;
3509         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3510                 rv2p = bnx2_xi_rv2p_proc1;
3511                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3512         } else {
3513                 rv2p = bnx2_rv2p_proc1;
3514                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3515         }
3516         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3517         if (rc < 0)
3518                 goto init_cpu_err;
3519
3520         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3521
3522         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3523                 rv2p = bnx2_xi_rv2p_proc2;
3524                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3525         } else {
3526                 rv2p = bnx2_rv2p_proc2;
3527                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3528         }
3529         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3530         if (rc < 0)
3531                 goto init_cpu_err;
3532
3533         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3534
3535         /* Initialize the RX Processor. */
3536         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3537                 fw = &bnx2_rxp_fw_09;
3538         else
3539                 fw = &bnx2_rxp_fw_06;
3540
3541         fw->text = text;
3542         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3543         if (rc)
3544                 goto init_cpu_err;
3545
3546         /* Initialize the TX Processor. */
3547         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3548                 fw = &bnx2_txp_fw_09;
3549         else
3550                 fw = &bnx2_txp_fw_06;
3551
3552         fw->text = text;
3553         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3554         if (rc)
3555                 goto init_cpu_err;
3556
3557         /* Initialize the TX Patch-up Processor. */
3558         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3559                 fw = &bnx2_tpat_fw_09;
3560         else
3561                 fw = &bnx2_tpat_fw_06;
3562
3563         fw->text = text;
3564         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3565         if (rc)
3566                 goto init_cpu_err;
3567
3568         /* Initialize the Completion Processor. */
3569         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3570                 fw = &bnx2_com_fw_09;
3571         else
3572                 fw = &bnx2_com_fw_06;
3573
3574         fw->text = text;
3575         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3576         if (rc)
3577                 goto init_cpu_err;
3578
3579         /* Initialize the Command Processor. */
3580         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3581                 fw = &bnx2_cp_fw_09;
3582         else
3583                 fw = &bnx2_cp_fw_06;
3584
3585         fw->text = text;
3586         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3587
3588 init_cpu_err:
3589         vfree(text);
3590         return rc;
3591 }
3592
3593 static int
3594 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3595 {
3596         u16 pmcsr;
3597
3598         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3599
3600         switch (state) {
3601         case PCI_D0: {
3602                 u32 val;
3603
3604                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3605                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3606                         PCI_PM_CTRL_PME_STATUS);
3607
3608                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3609                         /* delay required during transition out of D3hot */
3610                         msleep(20);
3611
3612                 val = REG_RD(bp, BNX2_EMAC_MODE);
3613                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3614                 val &= ~BNX2_EMAC_MODE_MPKT;
3615                 REG_WR(bp, BNX2_EMAC_MODE, val);
3616
3617                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3618                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3619                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3620                 break;
3621         }
3622         case PCI_D3hot: {
3623                 int i;
3624                 u32 val, wol_msg;
3625
3626                 if (bp->wol) {
3627                         u32 advertising;
3628                         u8 autoneg;
3629
3630                         autoneg = bp->autoneg;
3631                         advertising = bp->advertising;
3632
3633                         if (bp->phy_port == PORT_TP) {
3634                                 bp->autoneg = AUTONEG_SPEED;
3635                                 bp->advertising = ADVERTISED_10baseT_Half |
3636                                         ADVERTISED_10baseT_Full |
3637                                         ADVERTISED_100baseT_Half |
3638                                         ADVERTISED_100baseT_Full |
3639                                         ADVERTISED_Autoneg;
3640                         }
3641
3642                         spin_lock_bh(&bp->phy_lock);
3643                         bnx2_setup_phy(bp, bp->phy_port);
3644                         spin_unlock_bh(&bp->phy_lock);
3645
3646                         bp->autoneg = autoneg;
3647                         bp->advertising = advertising;
3648
3649                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3650
3651                         val = REG_RD(bp, BNX2_EMAC_MODE);
3652
3653                         /* Enable port mode. */
3654                         val &= ~BNX2_EMAC_MODE_PORT;
3655                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3656                                BNX2_EMAC_MODE_ACPI_RCVD |
3657                                BNX2_EMAC_MODE_MPKT;
3658                         if (bp->phy_port == PORT_TP)
3659                                 val |= BNX2_EMAC_MODE_PORT_MII;
3660                         else {
3661                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3662                                 if (bp->line_speed == SPEED_2500)
3663                                         val |= BNX2_EMAC_MODE_25G_MODE;
3664                         }
3665
3666                         REG_WR(bp, BNX2_EMAC_MODE, val);
3667
3668                         /* receive all multicast */
3669                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3670                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3671                                        0xffffffff);
3672                         }
3673                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3674                                BNX2_EMAC_RX_MODE_SORT_MODE);
3675
3676                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3677                               BNX2_RPM_SORT_USER0_MC_EN;
3678                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3679                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3680                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3681                                BNX2_RPM_SORT_USER0_ENA);
3682
3683                         /* Need to enable EMAC and RPM for WOL. */
3684                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3685                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3686                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3687                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3688
3689                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3690                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3691                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3692
3693                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3694                 }
3695                 else {
3696                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3697                 }
3698
3699                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3700                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3701                                      1, 0);
3702
3703                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3704                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3705                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3706
3707                         if (bp->wol)
3708                                 pmcsr |= 3;
3709                 }
3710                 else {
3711                         pmcsr |= 3;
3712                 }
3713                 if (bp->wol) {
3714                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3715                 }
3716                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3717                                       pmcsr);
3718
3719                 /* No more memory access after this point until
3720                  * device is brought back to D0.
3721                  */
3722                 udelay(50);
3723                 break;
3724         }
3725         default:
3726                 return -EINVAL;
3727         }
3728         return 0;
3729 }
3730
3731 static int
3732 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3733 {
3734         u32 val;
3735         int j;
3736
3737         /* Request access to the flash interface. */
3738         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3739         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3740                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3741                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3742                         break;
3743
3744                 udelay(5);
3745         }
3746
3747         if (j >= NVRAM_TIMEOUT_COUNT)
3748                 return -EBUSY;
3749
3750         return 0;
3751 }
3752
3753 static int
3754 bnx2_release_nvram_lock(struct bnx2 *bp)
3755 {
3756         int j;
3757         u32 val;
3758
3759         /* Relinquish nvram interface. */
3760         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3761
3762         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3763                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3764                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3765                         break;
3766
3767                 udelay(5);
3768         }
3769
3770         if (j >= NVRAM_TIMEOUT_COUNT)
3771                 return -EBUSY;
3772
3773         return 0;
3774 }
3775
3776
3777 static int
3778 bnx2_enable_nvram_write(struct bnx2 *bp)
3779 {
3780         u32 val;
3781
3782         val = REG_RD(bp, BNX2_MISC_CFG);
3783         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3784
3785         if (bp->flash_info->flags & BNX2_NV_WREN) {
3786                 int j;
3787
3788                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3789                 REG_WR(bp, BNX2_NVM_COMMAND,
3790                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3791
3792                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3793                         udelay(5);
3794
3795                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3796                         if (val & BNX2_NVM_COMMAND_DONE)
3797                                 break;
3798                 }
3799
3800                 if (j >= NVRAM_TIMEOUT_COUNT)
3801                         return -EBUSY;
3802         }
3803         return 0;
3804 }
3805
3806 static void
3807 bnx2_disable_nvram_write(struct bnx2 *bp)
3808 {
3809         u32 val;
3810
3811         val = REG_RD(bp, BNX2_MISC_CFG);
3812         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3813 }
3814
3815
3816 static void
3817 bnx2_enable_nvram_access(struct bnx2 *bp)
3818 {
3819         u32 val;
3820
3821         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3822         /* Enable both bits, even on read. */
3823         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3824                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3825 }
3826
3827 static void
3828 bnx2_disable_nvram_access(struct bnx2 *bp)
3829 {
3830         u32 val;
3831
3832         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3833         /* Disable both bits, even after read. */
3834         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3835                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3836                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3837 }
3838
3839 static int
3840 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3841 {
3842         u32 cmd;
3843         int j;
3844
3845         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3846                 /* Buffered flash, no erase needed */
3847                 return 0;
3848
3849         /* Build an erase command */
3850         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3851               BNX2_NVM_COMMAND_DOIT;
3852
3853         /* Need to clear DONE bit separately. */
3854         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3855
3856         /* Address of the NVRAM to read from. */
3857         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3858
3859         /* Issue an erase command. */
3860         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3861
3862         /* Wait for completion. */
3863         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3864                 u32 val;
3865
3866                 udelay(5);
3867
3868                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3869                 if (val & BNX2_NVM_COMMAND_DONE)
3870                         break;
3871         }
3872
3873         if (j >= NVRAM_TIMEOUT_COUNT)
3874                 return -EBUSY;
3875
3876         return 0;
3877 }
3878
3879 static int
3880 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3881 {
3882         u32 cmd;
3883         int j;
3884
3885         /* Build the command word. */
3886         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3887
3888         /* Calculate an offset of a buffered flash, not needed for 5709. */
3889         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3890                 offset = ((offset / bp->flash_info->page_size) <<
3891                            bp->flash_info->page_bits) +
3892                           (offset % bp->flash_info->page_size);
3893         }
3894
3895         /* Need to clear DONE bit separately. */
3896         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3897
3898         /* Address of the NVRAM to read from. */
3899         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3900
3901         /* Issue a read command. */
3902         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3903
3904         /* Wait for completion. */
3905         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3906                 u32 val;
3907
3908                 udelay(5);
3909
3910                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3911                 if (val & BNX2_NVM_COMMAND_DONE) {
3912                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3913                         memcpy(ret_val, &v, 4);
3914                         break;
3915                 }
3916         }
3917         if (j >= NVRAM_TIMEOUT_COUNT)
3918                 return -EBUSY;
3919
3920         return 0;
3921 }
3922
3923
3924 static int
3925 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3926 {
3927         u32 cmd;
3928         __be32 val32;
3929         int j;
3930
3931         /* Build the command word. */
3932         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3933
3934         /* Calculate an offset of a buffered flash, not needed for 5709. */
3935         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3936                 offset = ((offset / bp->flash_info->page_size) <<
3937                           bp->flash_info->page_bits) +
3938                          (offset % bp->flash_info->page_size);
3939         }
3940
3941         /* Need to clear DONE bit separately. */
3942         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3943
3944         memcpy(&val32, val, 4);
3945
3946         /* Write the data. */
3947         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3948
3949         /* Address of the NVRAM to write to. */
3950         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3951
3952         /* Issue the write command. */
3953         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3954
3955         /* Wait for completion. */
3956         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3957                 udelay(5);
3958
3959                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3960                         break;
3961         }
3962         if (j >= NVRAM_TIMEOUT_COUNT)
3963                 return -EBUSY;
3964
3965         return 0;
3966 }
3967
3968 static int
3969 bnx2_init_nvram(struct bnx2 *bp)
3970 {
3971         u32 val;
3972         int j, entry_count, rc = 0;
3973         struct flash_spec *flash;
3974
3975         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3976                 bp->flash_info = &flash_5709;
3977                 goto get_flash_size;
3978         }
3979
3980         /* Determine the selected interface. */
3981         val = REG_RD(bp, BNX2_NVM_CFG1);
3982
3983         entry_count = ARRAY_SIZE(flash_table);
3984
3985         if (val & 0x40000000) {
3986
3987                 /* Flash interface has been reconfigured */
3988                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3989                      j++, flash++) {
3990                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3991                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3992                                 bp->flash_info = flash;
3993                                 break;
3994                         }
3995                 }
3996         }
3997         else {
3998                 u32 mask;
3999                 /* Not yet been reconfigured */
4000
4001                 if (val & (1 << 23))
4002                         mask = FLASH_BACKUP_STRAP_MASK;
4003                 else
4004                         mask = FLASH_STRAP_MASK;
4005
4006                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4007                         j++, flash++) {
4008
4009                         if ((val & mask) == (flash->strapping & mask)) {
4010                                 bp->flash_info = flash;
4011
4012                                 /* Request access to the flash interface. */
4013                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4014                                         return rc;
4015
4016                                 /* Enable access to flash interface */
4017                                 bnx2_enable_nvram_access(bp);
4018
4019                                 /* Reconfigure the flash interface */
4020                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4021                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4022                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4023                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4024
4025                                 /* Disable access to flash interface */
4026                                 bnx2_disable_nvram_access(bp);
4027                                 bnx2_release_nvram_lock(bp);
4028
4029                                 break;
4030                         }
4031                 }
4032         } /* if (val & 0x40000000) */
4033
4034         if (j == entry_count) {
4035                 bp->flash_info = NULL;
4036                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4037                 return -ENODEV;
4038         }
4039
4040 get_flash_size:
4041         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4042         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4043         if (val)
4044                 bp->flash_size = val;
4045         else
4046                 bp->flash_size = bp->flash_info->total_size;
4047
4048         return rc;
4049 }
4050
4051 static int
4052 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4053                 int buf_size)
4054 {
4055         int rc = 0;
4056         u32 cmd_flags, offset32, len32, extra;
4057
4058         if (buf_size == 0)
4059                 return 0;
4060
4061         /* Request access to the flash interface. */
4062         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4063                 return rc;
4064
4065         /* Enable access to flash interface */
4066         bnx2_enable_nvram_access(bp);
4067
4068         len32 = buf_size;
4069         offset32 = offset;
4070         extra = 0;
4071
4072         cmd_flags = 0;
4073
4074         if (offset32 & 3) {
4075                 u8 buf[4];
4076                 u32 pre_len;
4077
4078                 offset32 &= ~3;
4079                 pre_len = 4 - (offset & 3);
4080
4081                 if (pre_len >= len32) {
4082                         pre_len = len32;
4083                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4084                                     BNX2_NVM_COMMAND_LAST;
4085                 }
4086                 else {
4087                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4088                 }
4089
4090                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4091
4092                 if (rc)
4093                         return rc;
4094
4095                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4096
4097                 offset32 += 4;
4098                 ret_buf += pre_len;
4099                 len32 -= pre_len;
4100         }
4101         if (len32 & 3) {
4102                 extra = 4 - (len32 & 3);
4103                 len32 = (len32 + 4) & ~3;
4104         }
4105
4106         if (len32 == 4) {
4107                 u8 buf[4];
4108
4109                 if (cmd_flags)
4110                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4111                 else
4112                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4113                                     BNX2_NVM_COMMAND_LAST;
4114
4115                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4116
4117                 memcpy(ret_buf, buf, 4 - extra);
4118         }
4119         else if (len32 > 0) {
4120                 u8 buf[4];
4121
4122                 /* Read the first word. */
4123                 if (cmd_flags)
4124                         cmd_flags = 0;
4125                 else
4126                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4127
4128                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4129
4130                 /* Advance to the next dword. */
4131                 offset32 += 4;
4132                 ret_buf += 4;
4133                 len32 -= 4;
4134
4135                 while (len32 > 4 && rc == 0) {
4136                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4137
4138                         /* Advance to the next dword. */
4139                         offset32 += 4;
4140                         ret_buf += 4;
4141                         len32 -= 4;
4142                 }
4143
4144                 if (rc)
4145                         return rc;
4146
4147                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4148                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4149
4150                 memcpy(ret_buf, buf, 4 - extra);
4151         }
4152
4153         /* Disable access to flash interface */
4154         bnx2_disable_nvram_access(bp);
4155
4156         bnx2_release_nvram_lock(bp);
4157
4158         return rc;
4159 }
4160
4161 static int
4162 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4163                 int buf_size)
4164 {
4165         u32 written, offset32, len32;
4166         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4167         int rc = 0;
4168         int align_start, align_end;
4169
4170         buf = data_buf;
4171         offset32 = offset;
4172         len32 = buf_size;
4173         align_start = align_end = 0;
4174
4175         if ((align_start = (offset32 & 3))) {
4176                 offset32 &= ~3;
4177                 len32 += align_start;
4178                 if (len32 < 4)
4179                         len32 = 4;
4180                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4181                         return rc;
4182         }
4183
4184         if (len32 & 3) {
4185                 align_end = 4 - (len32 & 3);
4186                 len32 += align_end;
4187                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4188                         return rc;
4189         }
4190
4191         if (align_start || align_end) {
4192                 align_buf = kmalloc(len32, GFP_KERNEL);
4193                 if (align_buf == NULL)
4194                         return -ENOMEM;
4195                 if (align_start) {
4196                         memcpy(align_buf, start, 4);
4197                 }
4198                 if (align_end) {
4199                         memcpy(align_buf + len32 - 4, end, 4);
4200                 }
4201                 memcpy(align_buf + align_start, data_buf, buf_size);
4202                 buf = align_buf;
4203         }
4204
4205         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4206                 flash_buffer = kmalloc(264, GFP_KERNEL);
4207                 if (flash_buffer == NULL) {
4208                         rc = -ENOMEM;
4209                         goto nvram_write_end;
4210                 }
4211         }
4212
4213         written = 0;
4214         while ((written < len32) && (rc == 0)) {
4215                 u32 page_start, page_end, data_start, data_end;
4216                 u32 addr, cmd_flags;
4217                 int i;
4218
4219                 /* Find the page_start addr */
4220                 page_start = offset32 + written;
4221                 page_start -= (page_start % bp->flash_info->page_size);
4222                 /* Find the page_end addr */
4223                 page_end = page_start + bp->flash_info->page_size;
4224                 /* Find the data_start addr */
4225                 data_start = (written == 0) ? offset32 : page_start;
4226                 /* Find the data_end addr */
4227                 data_end = (page_end > offset32 + len32) ?
4228                         (offset32 + len32) : page_end;
4229
4230                 /* Request access to the flash interface. */
4231                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4232                         goto nvram_write_end;
4233
4234                 /* Enable access to flash interface */
4235                 bnx2_enable_nvram_access(bp);
4236
4237                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4238                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4239                         int j;
4240
4241                         /* Read the whole page into the buffer
4242                          * (non-buffer flash only) */
4243                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4244                                 if (j == (bp->flash_info->page_size - 4)) {
4245                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4246                                 }
4247                                 rc = bnx2_nvram_read_dword(bp,
4248                                         page_start + j,
4249                                         &flash_buffer[j],
4250                                         cmd_flags);
4251
4252                                 if (rc)
4253                                         goto nvram_write_end;
4254
4255                                 cmd_flags = 0;
4256                         }
4257                 }
4258
4259                 /* Enable writes to flash interface (unlock write-protect) */
4260                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4261                         goto nvram_write_end;
4262
4263                 /* Loop to write back the buffer data from page_start to
4264                  * data_start */
4265                 i = 0;
4266                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4267                         /* Erase the page */
4268                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4269                                 goto nvram_write_end;
4270
4271                         /* Re-enable the write again for the actual write */
4272                         bnx2_enable_nvram_write(bp);
4273
4274                         for (addr = page_start; addr < data_start;
4275                                 addr += 4, i += 4) {
4276
4277                                 rc = bnx2_nvram_write_dword(bp, addr,
4278                                         &flash_buffer[i], cmd_flags);
4279
4280                                 if (rc != 0)
4281                                         goto nvram_write_end;
4282
4283                                 cmd_flags = 0;
4284                         }
4285                 }
4286
4287                 /* Loop to write the new data from data_start to data_end */
4288                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4289                         if ((addr == page_end - 4) ||
4290                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4291                                  (addr == data_end - 4))) {
4292
4293                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4294                         }
4295                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4296                                 cmd_flags);
4297
4298                         if (rc != 0)
4299                                 goto nvram_write_end;
4300
4301                         cmd_flags = 0;
4302                         buf += 4;
4303                 }
4304
4305                 /* Loop to write back the buffer data from data_end
4306                  * to page_end */
4307                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4308                         for (addr = data_end; addr < page_end;
4309                                 addr += 4, i += 4) {
4310
4311                                 if (addr == page_end-4) {
4312                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4313                                 }
4314                                 rc = bnx2_nvram_write_dword(bp, addr,
4315                                         &flash_buffer[i], cmd_flags);
4316
4317                                 if (rc != 0)
4318                                         goto nvram_write_end;
4319
4320                                 cmd_flags = 0;
4321                         }
4322                 }
4323
4324                 /* Disable writes to flash interface (lock write-protect) */
4325                 bnx2_disable_nvram_write(bp);
4326
4327                 /* Disable access to flash interface */
4328                 bnx2_disable_nvram_access(bp);
4329                 bnx2_release_nvram_lock(bp);
4330
4331                 /* Increment written */
4332                 written += data_end - data_start;
4333         }
4334
4335 nvram_write_end:
4336         kfree(flash_buffer);
4337         kfree(align_buf);
4338         return rc;
4339 }
4340
4341 static void
4342 bnx2_init_fw_cap(struct bnx2 *bp)
4343 {
4344         u32 val, sig = 0;
4345
4346         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4347         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4348
4349         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4350                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4351
4352         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4353         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4354                 return;
4355
4356         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4357                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4358                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4359         }
4360
4361         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4362             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4363                 u32 link;
4364
4365                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4366
4367                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4368                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4369                         bp->phy_port = PORT_FIBRE;
4370                 else
4371                         bp->phy_port = PORT_TP;
4372
4373                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4374                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4375         }
4376
4377         if (netif_running(bp->dev) && sig)
4378                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4379 }
4380
4381 static void
4382 bnx2_setup_msix_tbl(struct bnx2 *bp)
4383 {
4384         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4385
4386         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4387         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4388 }
4389
4390 static int
4391 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4392 {
4393         u32 val;
4394         int i, rc = 0;
4395         u8 old_port;
4396
4397         /* Wait for the current PCI transaction to complete before
4398          * issuing a reset. */
4399         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4400                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4401                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4402                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4403                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4404         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4405         udelay(5);
4406
4407         /* Wait for the firmware to tell us it is ok to issue a reset. */
4408         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4409
4410         /* Deposit a driver reset signature so the firmware knows that
4411          * this is a soft reset. */
4412         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4413                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4414
4415         /* Do a dummy read to force the chip to complete all current transaction
4416          * before we issue a reset. */
4417         val = REG_RD(bp, BNX2_MISC_ID);
4418
4419         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4420                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4421                 REG_RD(bp, BNX2_MISC_COMMAND);
4422                 udelay(5);
4423
4424                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4425                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4426
4427                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4428
4429         } else {
4430                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4431                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4432                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4433
4434                 /* Chip reset. */
4435                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4436
4437                 /* Reading back any register after chip reset will hang the
4438                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4439                  * of margin for write posting.
4440                  */
4441                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4442                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4443                         msleep(20);
4444
4445                 /* Reset takes approximate 30 usec */
4446                 for (i = 0; i < 10; i++) {
4447                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4448                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4449                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4450                                 break;
4451                         udelay(10);
4452                 }
4453
4454                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4455                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4456                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4457                         return -EBUSY;
4458                 }
4459         }
4460
4461         /* Make sure byte swapping is properly configured. */
4462         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4463         if (val != 0x01020304) {
4464                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4465                 return -ENODEV;
4466         }
4467
4468         /* Wait for the firmware to finish its initialization. */
4469         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4470         if (rc)
4471                 return rc;
4472
4473         spin_lock_bh(&bp->phy_lock);
4474         old_port = bp->phy_port;
4475         bnx2_init_fw_cap(bp);
4476         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4477             old_port != bp->phy_port)
4478                 bnx2_set_default_remote_link(bp);
4479         spin_unlock_bh(&bp->phy_lock);
4480
4481         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4482                 /* Adjust the voltage regular to two steps lower.  The default
4483                  * of this register is 0x0000000e. */
4484                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4485
4486                 /* Remove bad rbuf memory from the free pool. */
4487                 rc = bnx2_alloc_bad_rbuf(bp);
4488         }
4489
4490         if (bp->flags & BNX2_FLAG_USING_MSIX)
4491                 bnx2_setup_msix_tbl(bp);
4492
4493         return rc;
4494 }
4495
4496 static int
4497 bnx2_init_chip(struct bnx2 *bp)
4498 {
4499         u32 val, mtu;
4500         int rc, i;
4501
4502         /* Make sure the interrupt is not active. */
4503         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4504
4505         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4506               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4507 #ifdef __BIG_ENDIAN
4508               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4509 #endif
4510               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4511               DMA_READ_CHANS << 12 |
4512               DMA_WRITE_CHANS << 16;
4513
4514         val |= (0x2 << 20) | (1 << 11);
4515
4516         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4517                 val |= (1 << 23);
4518
4519         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4520             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4521                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4522
4523         REG_WR(bp, BNX2_DMA_CONFIG, val);
4524
4525         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4526                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4527                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4528                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4529         }
4530
4531         if (bp->flags & BNX2_FLAG_PCIX) {
4532                 u16 val16;
4533
4534                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4535                                      &val16);
4536                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4537                                       val16 & ~PCI_X_CMD_ERO);
4538         }
4539
4540         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4541                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4542                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4543                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4544
4545         /* Initialize context mapping and zero out the quick contexts.  The
4546          * context block must have already been enabled. */
4547         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4548                 rc = bnx2_init_5709_context(bp);
4549                 if (rc)
4550                         return rc;
4551         } else
4552                 bnx2_init_context(bp);
4553
4554         if ((rc = bnx2_init_cpus(bp)) != 0)
4555                 return rc;
4556
4557         bnx2_init_nvram(bp);
4558
4559         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4560
4561         val = REG_RD(bp, BNX2_MQ_CONFIG);
4562         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4563         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4564         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4565                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4566
4567         REG_WR(bp, BNX2_MQ_CONFIG, val);
4568
4569         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4570         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4571         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4572
4573         val = (BCM_PAGE_BITS - 8) << 24;
4574         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4575
4576         /* Configure page size. */
4577         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4578         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4579         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4580         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4581
4582         val = bp->mac_addr[0] +
4583               (bp->mac_addr[1] << 8) +
4584               (bp->mac_addr[2] << 16) +
4585               bp->mac_addr[3] +
4586               (bp->mac_addr[4] << 8) +
4587               (bp->mac_addr[5] << 16);
4588         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4589
4590         /* Program the MTU.  Also include 4 bytes for CRC32. */
4591         mtu = bp->dev->mtu;
4592         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4593         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4594                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4595         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4596
4597         if (mtu < 1500)
4598                 mtu = 1500;
4599
4600         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4601         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4602         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4603
4604         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4605                 bp->bnx2_napi[i].last_status_idx = 0;
4606
4607         bp->idle_chk_status_idx = 0xffff;
4608
4609         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4610
4611         /* Set up how to generate a link change interrupt. */
4612         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4613
4614         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4615                (u64) bp->status_blk_mapping & 0xffffffff);
4616         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4617
4618         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4619                (u64) bp->stats_blk_mapping & 0xffffffff);
4620         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4621                (u64) bp->stats_blk_mapping >> 32);
4622
4623         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4624                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4625
4626         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4627                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4628
4629         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4630                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4631
4632         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4633
4634         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4635
4636         REG_WR(bp, BNX2_HC_COM_TICKS,
4637                (bp->com_ticks_int << 16) | bp->com_ticks);
4638
4639         REG_WR(bp, BNX2_HC_CMD_TICKS,
4640                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4641
4642         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4643                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4644         else
4645                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4646         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4647
4648         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4649                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4650         else {
4651                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4652                       BNX2_HC_CONFIG_COLLECT_STATS;
4653         }
4654
4655         if (bp->irq_nvecs > 1) {
4656                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4657                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4658
4659                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4660         }
4661
4662         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4663                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4664
4665         REG_WR(bp, BNX2_HC_CONFIG, val);
4666
4667         for (i = 1; i < bp->irq_nvecs; i++) {
4668                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4669                            BNX2_HC_SB_CONFIG_1;
4670
4671                 REG_WR(bp, base,
4672                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4673                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4674                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4675
4676                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4677                         (bp->tx_quick_cons_trip_int << 16) |
4678                          bp->tx_quick_cons_trip);
4679
4680                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4681                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4682
4683                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4684                        (bp->rx_quick_cons_trip_int << 16) |
4685                         bp->rx_quick_cons_trip);
4686
4687                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4688                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4689         }
4690
4691         /* Clear internal stats counters. */
4692         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4693
4694         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4695
4696         /* Initialize the receive filter. */
4697         bnx2_set_rx_mode(bp->dev);
4698
4699         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4700                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4701                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4702                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4703         }
4704         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4705                           1, 0);
4706
4707         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4708         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4709
4710         udelay(20);
4711
4712         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4713
4714         return rc;
4715 }
4716
4717 static void
4718 bnx2_clear_ring_states(struct bnx2 *bp)
4719 {
4720         struct bnx2_napi *bnapi;
4721         struct bnx2_tx_ring_info *txr;
4722         struct bnx2_rx_ring_info *rxr;
4723         int i;
4724
4725         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4726                 bnapi = &bp->bnx2_napi[i];
4727                 txr = &bnapi->tx_ring;
4728                 rxr = &bnapi->rx_ring;
4729
4730                 txr->tx_cons = 0;
4731                 txr->hw_tx_cons = 0;
4732                 rxr->rx_prod_bseq = 0;
4733                 rxr->rx_prod = 0;
4734                 rxr->rx_cons = 0;
4735                 rxr->rx_pg_prod = 0;
4736                 rxr->rx_pg_cons = 0;
4737         }
4738 }
4739
4740 static void
4741 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4742 {
4743         u32 val, offset0, offset1, offset2, offset3;
4744         u32 cid_addr = GET_CID_ADDR(cid);
4745
4746         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4747                 offset0 = BNX2_L2CTX_TYPE_XI;
4748                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4749                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4750                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4751         } else {
4752                 offset0 = BNX2_L2CTX_TYPE;
4753                 offset1 = BNX2_L2CTX_CMD_TYPE;
4754                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4755                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4756         }
4757         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4758         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4759
4760         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4761         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4762
4763         val = (u64) txr->tx_desc_mapping >> 32;
4764         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4765
4766         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4767         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4768 }
4769
4770 static void
4771 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4772 {
4773         struct tx_bd *txbd;
4774         u32 cid = TX_CID;
4775         struct bnx2_napi *bnapi;
4776         struct bnx2_tx_ring_info *txr;
4777
4778         bnapi = &bp->bnx2_napi[ring_num];
4779         txr = &bnapi->tx_ring;
4780
4781         if (ring_num == 0)
4782                 cid = TX_CID;
4783         else
4784                 cid = TX_TSS_CID + ring_num - 1;
4785
4786         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4787
4788         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4789
4790         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4791         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4792
4793         txr->tx_prod = 0;
4794         txr->tx_prod_bseq = 0;
4795
4796         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4797         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4798
4799         bnx2_init_tx_context(bp, cid, txr);
4800 }
4801
4802 static void
4803 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4804                      int num_rings)
4805 {
4806         int i;
4807         struct rx_bd *rxbd;
4808
4809         for (i = 0; i < num_rings; i++) {
4810                 int j;
4811
4812                 rxbd = &rx_ring[i][0];
4813                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4814                         rxbd->rx_bd_len = buf_size;
4815                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4816                 }
4817                 if (i == (num_rings - 1))
4818                         j = 0;
4819                 else
4820                         j = i + 1;
4821                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4822                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4823         }
4824 }
4825
4826 static void
4827 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4828 {
4829         int i;
4830         u16 prod, ring_prod;
4831         u32 cid, rx_cid_addr, val;
4832         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4833         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4834
4835         if (ring_num == 0)
4836                 cid = RX_CID;
4837         else
4838                 cid = RX_RSS_CID + ring_num - 1;
4839
4840         rx_cid_addr = GET_CID_ADDR(cid);
4841
4842         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4843                              bp->rx_buf_use_size, bp->rx_max_ring);
4844
4845         bnx2_init_rx_context(bp, cid);
4846
4847         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4848                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4849                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4850         }
4851
4852         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4853         if (bp->rx_pg_ring_size) {
4854                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4855                                      rxr->rx_pg_desc_mapping,
4856                                      PAGE_SIZE, bp->rx_max_pg_ring);
4857                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4858                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4859                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4860                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4861
4862                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4863                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4864
4865                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4866                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4867
4868                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4869                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4870         }
4871
4872         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4873         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4874
4875         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4876         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4877
4878         ring_prod = prod = rxr->rx_pg_prod;
4879         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4880                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4881                         break;
4882                 prod = NEXT_RX_BD(prod);
4883                 ring_prod = RX_PG_RING_IDX(prod);
4884         }
4885         rxr->rx_pg_prod = prod;
4886
4887         ring_prod = prod = rxr->rx_prod;
4888         for (i = 0; i < bp->rx_ring_size; i++) {
4889                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4890                         break;
4891                 prod = NEXT_RX_BD(prod);
4892                 ring_prod = RX_RING_IDX(prod);
4893         }
4894         rxr->rx_prod = prod;
4895
4896         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4897         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4898         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4899
4900         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4901         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4902
4903         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4904 }
4905
4906 static void
4907 bnx2_init_all_rings(struct bnx2 *bp)
4908 {
4909         int i;
4910         u32 val;
4911
4912         bnx2_clear_ring_states(bp);
4913
4914         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4915         for (i = 0; i < bp->num_tx_rings; i++)
4916                 bnx2_init_tx_ring(bp, i);
4917
4918         if (bp->num_tx_rings > 1)
4919                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4920                        (TX_TSS_CID << 7));
4921
4922         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4923         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4924
4925         for (i = 0; i < bp->num_rx_rings; i++)
4926                 bnx2_init_rx_ring(bp, i);
4927
4928         if (bp->num_rx_rings > 1) {
4929                 u32 tbl_32;
4930                 u8 *tbl = (u8 *) &tbl_32;
4931
4932                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4933                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4934
4935                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4936                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
4937                         if ((i % 4) == 3)
4938                                 bnx2_reg_wr_ind(bp,
4939                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
4940                                                 cpu_to_be32(tbl_32));
4941                 }
4942
4943                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4944                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4945
4946                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4947
4948         }
4949 }
4950
4951 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4952 {
4953         u32 max, num_rings = 1;
4954
4955         while (ring_size > MAX_RX_DESC_CNT) {
4956                 ring_size -= MAX_RX_DESC_CNT;
4957                 num_rings++;
4958         }
4959         /* round to next power of 2 */
4960         max = max_size;
4961         while ((max & num_rings) == 0)
4962                 max >>= 1;
4963
4964         if (num_rings != max)
4965                 max <<= 1;
4966
4967         return max;
4968 }
4969
4970 static void
4971 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4972 {
4973         u32 rx_size, rx_space, jumbo_size;
4974
4975         /* 8 for CRC and VLAN */
4976         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4977
4978         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4979                 sizeof(struct skb_shared_info);
4980
4981         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4982         bp->rx_pg_ring_size = 0;
4983         bp->rx_max_pg_ring = 0;
4984         bp->rx_max_pg_ring_idx = 0;
4985         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4986                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4987
4988                 jumbo_size = size * pages;
4989                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4990                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4991
4992                 bp->rx_pg_ring_size = jumbo_size;
4993                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4994                                                         MAX_RX_PG_RINGS);
4995                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4996                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4997                 bp->rx_copy_thresh = 0;
4998         }
4999
5000         bp->rx_buf_use_size = rx_size;
5001         /* hw alignment */
5002         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5003         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5004         bp->rx_ring_size = size;
5005         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5006         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5007 }
5008
5009 static void
5010 bnx2_free_tx_skbs(struct bnx2 *bp)
5011 {
5012         int i;
5013
5014         for (i = 0; i < bp->num_tx_rings; i++) {
5015                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5016                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5017                 int j;
5018
5019                 if (txr->tx_buf_ring == NULL)
5020                         continue;
5021
5022                 for (j = 0; j < TX_DESC_CNT; ) {
5023                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5024                         struct sk_buff *skb = tx_buf->skb;
5025
5026                         if (skb == NULL) {
5027                                 j++;
5028                                 continue;
5029                         }
5030
5031                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5032
5033                         tx_buf->skb = NULL;
5034
5035                         j += skb_shinfo(skb)->nr_frags + 1;
5036                         dev_kfree_skb(skb);
5037                 }
5038         }
5039 }
5040
5041 static void
5042 bnx2_free_rx_skbs(struct bnx2 *bp)
5043 {
5044         int i;
5045
5046         for (i = 0; i < bp->num_rx_rings; i++) {
5047                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5048                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5049                 int j;
5050
5051                 if (rxr->rx_buf_ring == NULL)
5052                         return;
5053
5054                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5055                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5056                         struct sk_buff *skb = rx_buf->skb;
5057
5058                         if (skb == NULL)
5059                                 continue;
5060
5061                         pci_unmap_single(bp->pdev,
5062                                          pci_unmap_addr(rx_buf, mapping),
5063                                          bp->rx_buf_use_size,
5064                                          PCI_DMA_FROMDEVICE);
5065
5066                         rx_buf->skb = NULL;
5067
5068                         dev_kfree_skb(skb);
5069                 }
5070                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5071                         bnx2_free_rx_page(bp, rxr, j);
5072         }
5073 }
5074
5075 static void
5076 bnx2_free_skbs(struct bnx2 *bp)
5077 {
5078         bnx2_free_tx_skbs(bp);
5079         bnx2_free_rx_skbs(bp);
5080 }
5081
5082 static int
5083 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5084 {
5085         int rc;
5086
5087         rc = bnx2_reset_chip(bp, reset_code);
5088         bnx2_free_skbs(bp);
5089         if (rc)
5090                 return rc;
5091
5092         if ((rc = bnx2_init_chip(bp)) != 0)
5093                 return rc;
5094
5095         bnx2_init_all_rings(bp);
5096         return 0;
5097 }
5098
5099 static int
5100 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5101 {
5102         int rc;
5103
5104         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5105                 return rc;
5106
5107         spin_lock_bh(&bp->phy_lock);
5108         bnx2_init_phy(bp, reset_phy);
5109         bnx2_set_link(bp);
5110         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5111                 bnx2_remote_phy_event(bp);
5112         spin_unlock_bh(&bp->phy_lock);
5113         return 0;
5114 }
5115
5116 static int
5117 bnx2_shutdown_chip(struct bnx2 *bp)
5118 {
5119         u32 reset_code;
5120
5121         if (bp->flags & BNX2_FLAG_NO_WOL)
5122                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5123         else if (bp->wol)
5124                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5125         else
5126                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5127
5128         return bnx2_reset_chip(bp, reset_code);
5129 }
5130
5131 static int
5132 bnx2_test_registers(struct bnx2 *bp)
5133 {
5134         int ret;
5135         int i, is_5709;
5136         static const struct {
5137                 u16   offset;
5138                 u16   flags;
5139 #define BNX2_FL_NOT_5709        1
5140                 u32   rw_mask;
5141                 u32   ro_mask;
5142         } reg_tbl[] = {
5143                 { 0x006c, 0, 0x00000000, 0x0000003f },
5144                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5145                 { 0x0094, 0, 0x00000000, 0x00000000 },
5146
5147                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5148                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5149                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5150                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5151                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5152                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5153                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5154                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5155                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5156
5157                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5158                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5159                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5160                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5161                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5162                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5163
5164                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5165                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5166                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5167
5168                 { 0x1000, 0, 0x00000000, 0x00000001 },
5169                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5170
5171                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5172                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5173                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5174                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5175                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5176                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5177                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5178                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5179                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5180                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5181
5182                 { 0x1800, 0, 0x00000000, 0x00000001 },
5183                 { 0x1804, 0, 0x00000000, 0x00000003 },
5184
5185                 { 0x2800, 0, 0x00000000, 0x00000001 },
5186                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5187                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5188                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5189                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5190                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5191                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5192                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5193                 { 0x2840, 0, 0x00000000, 0xffffffff },
5194                 { 0x2844, 0, 0x00000000, 0xffffffff },
5195                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5196                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5197
5198                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5199                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5200
5201                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5202                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5203                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5204                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5205                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5206                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5207                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5208                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5209                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5210
5211                 { 0x5004, 0, 0x00000000, 0x0000007f },
5212                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5213
5214                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5215                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5216                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5217                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5218                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5219                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5220                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5221                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5222                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5223
5224                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5225                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5226                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5227                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5228                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5229                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5230                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5231                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5232                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5233                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5234                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5235                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5236                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5237                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5238                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5239                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5240                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5241                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5242                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5243                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5244                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5245                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5246                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5247
5248                 { 0xffff, 0, 0x00000000, 0x00000000 },
5249         };
5250
5251         ret = 0;
5252         is_5709 = 0;
5253         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5254                 is_5709 = 1;
5255
5256         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5257                 u32 offset, rw_mask, ro_mask, save_val, val;
5258                 u16 flags = reg_tbl[i].flags;
5259
5260                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5261                         continue;
5262
5263                 offset = (u32) reg_tbl[i].offset;
5264                 rw_mask = reg_tbl[i].rw_mask;
5265                 ro_mask = reg_tbl[i].ro_mask;
5266
5267                 save_val = readl(bp->regview + offset);
5268
5269                 writel(0, bp->regview + offset);
5270
5271                 val = readl(bp->regview + offset);
5272                 if ((val & rw_mask) != 0) {
5273                         goto reg_test_err;
5274                 }
5275
5276                 if ((val & ro_mask) != (save_val & ro_mask)) {
5277                         goto reg_test_err;
5278                 }
5279
5280                 writel(0xffffffff, bp->regview + offset);
5281
5282                 val = readl(bp->regview + offset);
5283                 if ((val & rw_mask) != rw_mask) {
5284                         goto reg_test_err;
5285                 }
5286
5287                 if ((val & ro_mask) != (save_val & ro_mask)) {
5288                         goto reg_test_err;
5289                 }
5290
5291                 writel(save_val, bp->regview + offset);
5292                 continue;
5293
5294 reg_test_err:
5295                 writel(save_val, bp->regview + offset);
5296                 ret = -ENODEV;
5297                 break;
5298         }
5299         return ret;
5300 }
5301
5302 static int
5303 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5304 {
5305         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5306                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5307         int i;
5308
5309         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5310                 u32 offset;
5311
5312                 for (offset = 0; offset < size; offset += 4) {
5313
5314                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5315
5316                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5317                                 test_pattern[i]) {
5318                                 return -ENODEV;
5319                         }
5320                 }
5321         }
5322         return 0;
5323 }
5324
5325 static int
5326 bnx2_test_memory(struct bnx2 *bp)
5327 {
5328         int ret = 0;
5329         int i;
5330         static struct mem_entry {
5331                 u32   offset;
5332                 u32   len;
5333         } mem_tbl_5706[] = {
5334                 { 0x60000,  0x4000 },
5335                 { 0xa0000,  0x3000 },
5336                 { 0xe0000,  0x4000 },
5337                 { 0x120000, 0x4000 },
5338                 { 0x1a0000, 0x4000 },
5339                 { 0x160000, 0x4000 },
5340                 { 0xffffffff, 0    },
5341         },
5342         mem_tbl_5709[] = {
5343                 { 0x60000,  0x4000 },
5344                 { 0xa0000,  0x3000 },
5345                 { 0xe0000,  0x4000 },
5346                 { 0x120000, 0x4000 },
5347                 { 0x1a0000, 0x4000 },
5348                 { 0xffffffff, 0    },
5349         };
5350         struct mem_entry *mem_tbl;
5351
5352         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5353                 mem_tbl = mem_tbl_5709;
5354         else
5355                 mem_tbl = mem_tbl_5706;
5356
5357         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5358                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5359                         mem_tbl[i].len)) != 0) {
5360                         return ret;
5361                 }
5362         }
5363
5364         return ret;
5365 }
5366
5367 #define BNX2_MAC_LOOPBACK       0
5368 #define BNX2_PHY_LOOPBACK       1
5369
5370 static int
5371 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5372 {
5373         unsigned int pkt_size, num_pkts, i;
5374         struct sk_buff *skb, *rx_skb;
5375         unsigned char *packet;
5376         u16 rx_start_idx, rx_idx;
5377         dma_addr_t map;
5378         struct tx_bd *txbd;
5379         struct sw_bd *rx_buf;
5380         struct l2_fhdr *rx_hdr;
5381         int ret = -ENODEV;
5382         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5383         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5384         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5385
5386         tx_napi = bnapi;
5387
5388         txr = &tx_napi->tx_ring;
5389         rxr = &bnapi->rx_ring;
5390         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5391                 bp->loopback = MAC_LOOPBACK;
5392                 bnx2_set_mac_loopback(bp);
5393         }
5394         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5395                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5396                         return 0;
5397
5398                 bp->loopback = PHY_LOOPBACK;
5399                 bnx2_set_phy_loopback(bp);
5400         }
5401         else
5402                 return -EINVAL;
5403
5404         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5405         skb = netdev_alloc_skb(bp->dev, pkt_size);
5406         if (!skb)
5407                 return -ENOMEM;
5408         packet = skb_put(skb, pkt_size);
5409         memcpy(packet, bp->dev->dev_addr, 6);
5410         memset(packet + 6, 0x0, 8);
5411         for (i = 14; i < pkt_size; i++)
5412                 packet[i] = (unsigned char) (i & 0xff);
5413
5414         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5415                 dev_kfree_skb(skb);
5416                 return -EIO;
5417         }
5418         map = skb_shinfo(skb)->dma_maps[0];
5419
5420         REG_WR(bp, BNX2_HC_COMMAND,
5421                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5422
5423         REG_RD(bp, BNX2_HC_COMMAND);
5424
5425         udelay(5);
5426         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5427
5428         num_pkts = 0;
5429
5430         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5431
5432         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5433         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5434         txbd->tx_bd_mss_nbytes = pkt_size;
5435         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5436
5437         num_pkts++;
5438         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5439         txr->tx_prod_bseq += pkt_size;
5440
5441         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5442         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5443
5444         udelay(100);
5445
5446         REG_WR(bp, BNX2_HC_COMMAND,
5447                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5448
5449         REG_RD(bp, BNX2_HC_COMMAND);
5450
5451         udelay(5);
5452
5453         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5454         dev_kfree_skb(skb);
5455
5456         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5457                 goto loopback_test_done;
5458
5459         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5460         if (rx_idx != rx_start_idx + num_pkts) {
5461                 goto loopback_test_done;
5462         }
5463
5464         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5465         rx_skb = rx_buf->skb;
5466
5467         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5468         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5469
5470         pci_dma_sync_single_for_cpu(bp->pdev,
5471                 pci_unmap_addr(rx_buf, mapping),
5472                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5473
5474         if (rx_hdr->l2_fhdr_status &
5475                 (L2_FHDR_ERRORS_BAD_CRC |
5476                 L2_FHDR_ERRORS_PHY_DECODE |
5477                 L2_FHDR_ERRORS_ALIGNMENT |
5478                 L2_FHDR_ERRORS_TOO_SHORT |
5479                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5480
5481                 goto loopback_test_done;
5482         }
5483
5484         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5485                 goto loopback_test_done;
5486         }
5487
5488         for (i = 14; i < pkt_size; i++) {
5489                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5490                         goto loopback_test_done;
5491                 }
5492         }
5493
5494         ret = 0;
5495
5496 loopback_test_done:
5497         bp->loopback = 0;
5498         return ret;
5499 }
5500
5501 #define BNX2_MAC_LOOPBACK_FAILED        1
5502 #define BNX2_PHY_LOOPBACK_FAILED        2
5503 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5504                                          BNX2_PHY_LOOPBACK_FAILED)
5505
5506 static int
5507 bnx2_test_loopback(struct bnx2 *bp)
5508 {
5509         int rc = 0;
5510
5511         if (!netif_running(bp->dev))
5512                 return BNX2_LOOPBACK_FAILED;
5513
5514         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5515         spin_lock_bh(&bp->phy_lock);
5516         bnx2_init_phy(bp, 1);
5517         spin_unlock_bh(&bp->phy_lock);
5518         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5519                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5520         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5521                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5522         return rc;
5523 }
5524
5525 #define NVRAM_SIZE 0x200
5526 #define CRC32_RESIDUAL 0xdebb20e3
5527
5528 static int
5529 bnx2_test_nvram(struct bnx2 *bp)
5530 {
5531         __be32 buf[NVRAM_SIZE / 4];
5532         u8 *data = (u8 *) buf;
5533         int rc = 0;
5534         u32 magic, csum;
5535
5536         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5537                 goto test_nvram_done;
5538
5539         magic = be32_to_cpu(buf[0]);
5540         if (magic != 0x669955aa) {
5541                 rc = -ENODEV;
5542                 goto test_nvram_done;
5543         }
5544
5545         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5546                 goto test_nvram_done;
5547
5548         csum = ether_crc_le(0x100, data);
5549         if (csum != CRC32_RESIDUAL) {
5550                 rc = -ENODEV;
5551                 goto test_nvram_done;
5552         }
5553
5554         csum = ether_crc_le(0x100, data + 0x100);
5555         if (csum != CRC32_RESIDUAL) {
5556                 rc = -ENODEV;
5557         }
5558
5559 test_nvram_done:
5560         return rc;
5561 }
5562
5563 static int
5564 bnx2_test_link(struct bnx2 *bp)
5565 {
5566         u32 bmsr;
5567
5568         if (!netif_running(bp->dev))
5569                 return -ENODEV;
5570
5571         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5572                 if (bp->link_up)
5573                         return 0;
5574                 return -ENODEV;
5575         }
5576         spin_lock_bh(&bp->phy_lock);
5577         bnx2_enable_bmsr1(bp);
5578         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5579         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5580         bnx2_disable_bmsr1(bp);
5581         spin_unlock_bh(&bp->phy_lock);
5582
5583         if (bmsr & BMSR_LSTATUS) {
5584                 return 0;
5585         }
5586         return -ENODEV;
5587 }
5588
5589 static int
5590 bnx2_test_intr(struct bnx2 *bp)
5591 {
5592         int i;
5593         u16 status_idx;
5594
5595         if (!netif_running(bp->dev))
5596                 return -ENODEV;
5597
5598         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5599
5600         /* This register is not touched during run-time. */
5601         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5602         REG_RD(bp, BNX2_HC_COMMAND);
5603
5604         for (i = 0; i < 10; i++) {
5605                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5606                         status_idx) {
5607
5608                         break;
5609                 }
5610
5611                 msleep_interruptible(10);
5612         }
5613         if (i < 10)
5614                 return 0;
5615
5616         return -ENODEV;
5617 }
5618
5619 /* Determining link for parallel detection. */
5620 static int
5621 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5622 {
5623         u32 mode_ctl, an_dbg, exp;
5624
5625         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5626                 return 0;
5627
5628         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5629         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5630
5631         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5632                 return 0;
5633
5634         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5635         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5636         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5637
5638         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5639                 return 0;
5640
5641         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5642         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5643         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5644
5645         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5646                 return 0;
5647
5648         return 1;
5649 }
5650
5651 static void
5652 bnx2_5706_serdes_timer(struct bnx2 *bp)
5653 {
5654         int check_link = 1;
5655
5656         spin_lock(&bp->phy_lock);
5657         if (bp->serdes_an_pending) {
5658                 bp->serdes_an_pending--;
5659                 check_link = 0;
5660         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5661                 u32 bmcr;
5662
5663                 bp->current_interval = BNX2_TIMER_INTERVAL;
5664
5665                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5666
5667                 if (bmcr & BMCR_ANENABLE) {
5668                         if (bnx2_5706_serdes_has_link(bp)) {
5669                                 bmcr &= ~BMCR_ANENABLE;
5670                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5671                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5672                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5673                         }
5674                 }
5675         }
5676         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5677                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5678                 u32 phy2;
5679
5680                 bnx2_write_phy(bp, 0x17, 0x0f01);
5681                 bnx2_read_phy(bp, 0x15, &phy2);
5682                 if (phy2 & 0x20) {
5683                         u32 bmcr;
5684
5685                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5686                         bmcr |= BMCR_ANENABLE;
5687                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5688
5689                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5690                 }
5691         } else
5692                 bp->current_interval = BNX2_TIMER_INTERVAL;
5693
5694         if (check_link) {
5695                 u32 val;
5696
5697                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5698                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5699                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5700
5701                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5702                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5703                                 bnx2_5706s_force_link_dn(bp, 1);
5704                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5705                         } else
5706                                 bnx2_set_link(bp);
5707                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5708                         bnx2_set_link(bp);
5709         }
5710         spin_unlock(&bp->phy_lock);
5711 }
5712
5713 static void
5714 bnx2_5708_serdes_timer(struct bnx2 *bp)
5715 {
5716         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5717                 return;
5718
5719         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5720                 bp->serdes_an_pending = 0;
5721                 return;
5722         }
5723
5724         spin_lock(&bp->phy_lock);
5725         if (bp->serdes_an_pending)
5726                 bp->serdes_an_pending--;
5727         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5728                 u32 bmcr;
5729
5730                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5731                 if (bmcr & BMCR_ANENABLE) {
5732                         bnx2_enable_forced_2g5(bp);
5733                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5734                 } else {
5735                         bnx2_disable_forced_2g5(bp);
5736                         bp->serdes_an_pending = 2;
5737                         bp->current_interval = BNX2_TIMER_INTERVAL;
5738                 }
5739
5740         } else
5741                 bp->current_interval = BNX2_TIMER_INTERVAL;
5742
5743         spin_unlock(&bp->phy_lock);
5744 }
5745
5746 static void
5747 bnx2_timer(unsigned long data)
5748 {
5749         struct bnx2 *bp = (struct bnx2 *) data;
5750
5751         if (!netif_running(bp->dev))
5752                 return;
5753
5754         if (atomic_read(&bp->intr_sem) != 0)
5755                 goto bnx2_restart_timer;
5756
5757         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5758              BNX2_FLAG_USING_MSI)
5759                 bnx2_chk_missed_msi(bp);
5760
5761         bnx2_send_heart_beat(bp);
5762
5763         bp->stats_blk->stat_FwRxDrop =
5764                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5765
5766         /* workaround occasional corrupted counters */
5767         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5768                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5769                                             BNX2_HC_COMMAND_STATS_NOW);
5770
5771         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5772                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5773                         bnx2_5706_serdes_timer(bp);
5774                 else
5775                         bnx2_5708_serdes_timer(bp);
5776         }
5777
5778 bnx2_restart_timer:
5779         mod_timer(&bp->timer, jiffies + bp->current_interval);
5780 }
5781
5782 static int
5783 bnx2_request_irq(struct bnx2 *bp)
5784 {
5785         unsigned long flags;
5786         struct bnx2_irq *irq;
5787         int rc = 0, i;
5788
5789         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5790                 flags = 0;
5791         else
5792                 flags = IRQF_SHARED;
5793
5794         for (i = 0; i < bp->irq_nvecs; i++) {
5795                 irq = &bp->irq_tbl[i];
5796                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5797                                  &bp->bnx2_napi[i]);
5798                 if (rc)
5799                         break;
5800                 irq->requested = 1;
5801         }
5802         return rc;
5803 }
5804
5805 static void
5806 bnx2_free_irq(struct bnx2 *bp)
5807 {
5808         struct bnx2_irq *irq;
5809         int i;
5810
5811         for (i = 0; i < bp->irq_nvecs; i++) {
5812                 irq = &bp->irq_tbl[i];
5813                 if (irq->requested)
5814                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5815                 irq->requested = 0;
5816         }
5817         if (bp->flags & BNX2_FLAG_USING_MSI)
5818                 pci_disable_msi(bp->pdev);
5819         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5820                 pci_disable_msix(bp->pdev);
5821
5822         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5823 }
5824
5825 static void
5826 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5827 {
5828         int i, rc;
5829         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5830         struct net_device *dev = bp->dev;
5831         const int len = sizeof(bp->irq_tbl[0].name);
5832
5833         bnx2_setup_msix_tbl(bp);
5834         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5835         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5836         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5837
5838         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5839                 msix_ent[i].entry = i;
5840                 msix_ent[i].vector = 0;
5841
5842                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5843                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5844         }
5845
5846         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5847         if (rc != 0)
5848                 return;
5849
5850         bp->irq_nvecs = msix_vecs;
5851         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5852         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5853                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5854 }
5855
5856 static void
5857 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5858 {
5859         int cpus = num_online_cpus();
5860         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5861
5862         bp->irq_tbl[0].handler = bnx2_interrupt;
5863         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5864         bp->irq_nvecs = 1;
5865         bp->irq_tbl[0].vector = bp->pdev->irq;
5866
5867         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5868                 bnx2_enable_msix(bp, msix_vecs);
5869
5870         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5871             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5872                 if (pci_enable_msi(bp->pdev) == 0) {
5873                         bp->flags |= BNX2_FLAG_USING_MSI;
5874                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5875                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5876                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5877                         } else
5878                                 bp->irq_tbl[0].handler = bnx2_msi;
5879
5880                         bp->irq_tbl[0].vector = bp->pdev->irq;
5881                 }
5882         }
5883
5884         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5885         bp->dev->real_num_tx_queues = bp->num_tx_rings;
5886
5887         bp->num_rx_rings = bp->irq_nvecs;
5888 }
5889
5890 /* Called with rtnl_lock */
5891 static int
5892 bnx2_open(struct net_device *dev)
5893 {
5894         struct bnx2 *bp = netdev_priv(dev);
5895         int rc;
5896
5897         netif_carrier_off(dev);
5898
5899         bnx2_set_power_state(bp, PCI_D0);
5900         bnx2_disable_int(bp);
5901
5902         bnx2_setup_int_mode(bp, disable_msi);
5903         bnx2_napi_enable(bp);
5904         rc = bnx2_alloc_mem(bp);
5905         if (rc)
5906                 goto open_err;
5907
5908         rc = bnx2_request_irq(bp);
5909         if (rc)
5910                 goto open_err;
5911
5912         rc = bnx2_init_nic(bp, 1);
5913         if (rc)
5914                 goto open_err;
5915
5916         mod_timer(&bp->timer, jiffies + bp->current_interval);
5917
5918         atomic_set(&bp->intr_sem, 0);
5919
5920         bnx2_enable_int(bp);
5921
5922         if (bp->flags & BNX2_FLAG_USING_MSI) {
5923                 /* Test MSI to make sure it is working
5924                  * If MSI test fails, go back to INTx mode
5925                  */
5926                 if (bnx2_test_intr(bp) != 0) {
5927                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5928                                " using MSI, switching to INTx mode. Please"
5929                                " report this failure to the PCI maintainer"
5930                                " and include system chipset information.\n",
5931                                bp->dev->name);
5932
5933                         bnx2_disable_int(bp);
5934                         bnx2_free_irq(bp);
5935
5936                         bnx2_setup_int_mode(bp, 1);
5937
5938                         rc = bnx2_init_nic(bp, 0);
5939
5940                         if (!rc)
5941                                 rc = bnx2_request_irq(bp);
5942
5943                         if (rc) {
5944                                 del_timer_sync(&bp->timer);
5945                                 goto open_err;
5946                         }
5947                         bnx2_enable_int(bp);
5948                 }
5949         }
5950         if (bp->flags & BNX2_FLAG_USING_MSI)
5951                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5952         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5953                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5954
5955         netif_tx_start_all_queues(dev);
5956
5957         return 0;
5958
5959 open_err:
5960         bnx2_napi_disable(bp);
5961         bnx2_free_skbs(bp);
5962         bnx2_free_irq(bp);
5963         bnx2_free_mem(bp);
5964         return rc;
5965 }
5966
5967 static void
5968 bnx2_reset_task(struct work_struct *work)
5969 {
5970         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5971
5972         if (!netif_running(bp->dev))
5973                 return;
5974
5975         bnx2_netif_stop(bp);
5976
5977         bnx2_init_nic(bp, 1);
5978
5979         atomic_set(&bp->intr_sem, 1);
5980         bnx2_netif_start(bp);
5981 }
5982
5983 static void
5984 bnx2_tx_timeout(struct net_device *dev)
5985 {
5986         struct bnx2 *bp = netdev_priv(dev);
5987
5988         /* This allows the netif to be shutdown gracefully before resetting */
5989         schedule_work(&bp->reset_task);
5990 }
5991
5992 #ifdef BCM_VLAN
5993 /* Called with rtnl_lock */
5994 static void
5995 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5996 {
5997         struct bnx2 *bp = netdev_priv(dev);
5998
5999         bnx2_netif_stop(bp);
6000
6001         bp->vlgrp = vlgrp;
6002         bnx2_set_rx_mode(dev);
6003         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6004                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6005
6006         bnx2_netif_start(bp);
6007 }
6008 #endif
6009
6010 /* Called with netif_tx_lock.
6011  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6012  * netif_wake_queue().
6013  */
6014 static int
6015 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6016 {
6017         struct bnx2 *bp = netdev_priv(dev);
6018         dma_addr_t mapping;
6019         struct tx_bd *txbd;
6020         struct sw_tx_bd *tx_buf;
6021         u32 len, vlan_tag_flags, last_frag, mss;
6022         u16 prod, ring_prod;
6023         int i;
6024         struct bnx2_napi *bnapi;
6025         struct bnx2_tx_ring_info *txr;
6026         struct netdev_queue *txq;
6027         struct skb_shared_info *sp;
6028
6029         /*  Determine which tx ring we will be placed on */
6030         i = skb_get_queue_mapping(skb);
6031         bnapi = &bp->bnx2_napi[i];
6032         txr = &bnapi->tx_ring;
6033         txq = netdev_get_tx_queue(dev, i);
6034
6035         if (unlikely(bnx2_tx_avail(bp, txr) <
6036             (skb_shinfo(skb)->nr_frags + 1))) {
6037                 netif_tx_stop_queue(txq);
6038                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6039                         dev->name);
6040
6041                 return NETDEV_TX_BUSY;
6042         }
6043         len = skb_headlen(skb);
6044         prod = txr->tx_prod;
6045         ring_prod = TX_RING_IDX(prod);
6046
6047         vlan_tag_flags = 0;
6048         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6049                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6050         }
6051
6052 #ifdef BCM_VLAN
6053         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6054                 vlan_tag_flags |=
6055                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6056         }
6057 #endif
6058         if ((mss = skb_shinfo(skb)->gso_size)) {
6059                 u32 tcp_opt_len;
6060                 struct iphdr *iph;
6061
6062                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6063
6064                 tcp_opt_len = tcp_optlen(skb);
6065
6066                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6067                         u32 tcp_off = skb_transport_offset(skb) -
6068                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6069
6070                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6071                                           TX_BD_FLAGS_SW_FLAGS;
6072                         if (likely(tcp_off == 0))
6073                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6074                         else {
6075                                 tcp_off >>= 3;
6076                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6077                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6078                                                   ((tcp_off & 0x10) <<
6079                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6080                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6081                         }
6082                 } else {
6083                         iph = ip_hdr(skb);
6084                         if (tcp_opt_len || (iph->ihl > 5)) {
6085                                 vlan_tag_flags |= ((iph->ihl - 5) +
6086                                                    (tcp_opt_len >> 2)) << 8;
6087                         }
6088                 }
6089         } else
6090                 mss = 0;
6091
6092         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6093                 dev_kfree_skb(skb);
6094                 return NETDEV_TX_OK;
6095         }
6096
6097         sp = skb_shinfo(skb);
6098         mapping = sp->dma_maps[0];
6099
6100         tx_buf = &txr->tx_buf_ring[ring_prod];
6101         tx_buf->skb = skb;
6102
6103         txbd = &txr->tx_desc_ring[ring_prod];
6104
6105         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6106         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6107         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6108         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6109
6110         last_frag = skb_shinfo(skb)->nr_frags;
6111
6112         for (i = 0; i < last_frag; i++) {
6113                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6114
6115                 prod = NEXT_TX_BD(prod);
6116                 ring_prod = TX_RING_IDX(prod);
6117                 txbd = &txr->tx_desc_ring[ring_prod];
6118
6119                 len = frag->size;
6120                 mapping = sp->dma_maps[i + 1];
6121
6122                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6123                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6124                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6125                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6126
6127         }
6128         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6129
6130         prod = NEXT_TX_BD(prod);
6131         txr->tx_prod_bseq += skb->len;
6132
6133         REG_WR16(bp, txr->tx_bidx_addr, prod);
6134         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6135
6136         mmiowb();
6137
6138         txr->tx_prod = prod;
6139         dev->trans_start = jiffies;
6140
6141         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6142                 netif_tx_stop_queue(txq);
6143                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6144                         netif_tx_wake_queue(txq);
6145         }
6146
6147         return NETDEV_TX_OK;
6148 }
6149
6150 /* Called with rtnl_lock */
6151 static int
6152 bnx2_close(struct net_device *dev)
6153 {
6154         struct bnx2 *bp = netdev_priv(dev);
6155
6156         cancel_work_sync(&bp->reset_task);
6157
6158         bnx2_disable_int_sync(bp);
6159         bnx2_napi_disable(bp);
6160         del_timer_sync(&bp->timer);
6161         bnx2_shutdown_chip(bp);
6162         bnx2_free_irq(bp);
6163         bnx2_free_skbs(bp);
6164         bnx2_free_mem(bp);
6165         bp->link_up = 0;
6166         netif_carrier_off(bp->dev);
6167         bnx2_set_power_state(bp, PCI_D3hot);
6168         return 0;
6169 }
6170
6171 #define GET_NET_STATS64(ctr)                                    \
6172         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6173         (unsigned long) (ctr##_lo)
6174
6175 #define GET_NET_STATS32(ctr)            \
6176         (ctr##_lo)
6177
6178 #if (BITS_PER_LONG == 64)
6179 #define GET_NET_STATS   GET_NET_STATS64
6180 #else
6181 #define GET_NET_STATS   GET_NET_STATS32
6182 #endif
6183
6184 static struct net_device_stats *
6185 bnx2_get_stats(struct net_device *dev)
6186 {
6187         struct bnx2 *bp = netdev_priv(dev);
6188         struct statistics_block *stats_blk = bp->stats_blk;
6189         struct net_device_stats *net_stats = &dev->stats;
6190
6191         if (bp->stats_blk == NULL) {
6192                 return net_stats;
6193         }
6194         net_stats->rx_packets =
6195                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6196                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6197                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6198
6199         net_stats->tx_packets =
6200                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6201                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6202                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6203
6204         net_stats->rx_bytes =
6205                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6206
6207         net_stats->tx_bytes =
6208                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6209
6210         net_stats->multicast =
6211                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6212
6213         net_stats->collisions =
6214                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6215
6216         net_stats->rx_length_errors =
6217                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6218                 stats_blk->stat_EtherStatsOverrsizePkts);
6219
6220         net_stats->rx_over_errors =
6221                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6222
6223         net_stats->rx_frame_errors =
6224                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6225
6226         net_stats->rx_crc_errors =
6227                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6228
6229         net_stats->rx_errors = net_stats->rx_length_errors +
6230                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6231                 net_stats->rx_crc_errors;
6232
6233         net_stats->tx_aborted_errors =
6234                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6235                 stats_blk->stat_Dot3StatsLateCollisions);
6236
6237         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6238             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6239                 net_stats->tx_carrier_errors = 0;
6240         else {
6241                 net_stats->tx_carrier_errors =
6242                         (unsigned long)
6243                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6244         }
6245
6246         net_stats->tx_errors =
6247                 (unsigned long)
6248                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6249                 +
6250                 net_stats->tx_aborted_errors +
6251                 net_stats->tx_carrier_errors;
6252
6253         net_stats->rx_missed_errors =
6254                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6255                 stats_blk->stat_FwRxDrop);
6256
6257         return net_stats;
6258 }
6259
6260 /* All ethtool functions called with rtnl_lock */
6261
6262 static int
6263 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6264 {
6265         struct bnx2 *bp = netdev_priv(dev);
6266         int support_serdes = 0, support_copper = 0;
6267
6268         cmd->supported = SUPPORTED_Autoneg;
6269         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6270                 support_serdes = 1;
6271                 support_copper = 1;
6272         } else if (bp->phy_port == PORT_FIBRE)
6273                 support_serdes = 1;
6274         else
6275                 support_copper = 1;
6276
6277         if (support_serdes) {
6278                 cmd->supported |= SUPPORTED_1000baseT_Full |
6279                         SUPPORTED_FIBRE;
6280                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6281                         cmd->supported |= SUPPORTED_2500baseX_Full;
6282
6283         }
6284         if (support_copper) {
6285                 cmd->supported |= SUPPORTED_10baseT_Half |
6286                         SUPPORTED_10baseT_Full |
6287                         SUPPORTED_100baseT_Half |
6288                         SUPPORTED_100baseT_Full |
6289                         SUPPORTED_1000baseT_Full |
6290                         SUPPORTED_TP;
6291
6292         }
6293
6294         spin_lock_bh(&bp->phy_lock);
6295         cmd->port = bp->phy_port;
6296         cmd->advertising = bp->advertising;
6297
6298         if (bp->autoneg & AUTONEG_SPEED) {
6299                 cmd->autoneg = AUTONEG_ENABLE;
6300         }
6301         else {
6302                 cmd->autoneg = AUTONEG_DISABLE;
6303         }
6304
6305         if (netif_carrier_ok(dev)) {
6306                 cmd->speed = bp->line_speed;
6307                 cmd->duplex = bp->duplex;
6308         }
6309         else {
6310                 cmd->speed = -1;
6311                 cmd->duplex = -1;
6312         }
6313         spin_unlock_bh(&bp->phy_lock);
6314
6315         cmd->transceiver = XCVR_INTERNAL;
6316         cmd->phy_address = bp->phy_addr;
6317
6318         return 0;
6319 }
6320
6321 static int
6322 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6323 {
6324         struct bnx2 *bp = netdev_priv(dev);
6325         u8 autoneg = bp->autoneg;
6326         u8 req_duplex = bp->req_duplex;
6327         u16 req_line_speed = bp->req_line_speed;
6328         u32 advertising = bp->advertising;
6329         int err = -EINVAL;
6330
6331         spin_lock_bh(&bp->phy_lock);
6332
6333         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6334                 goto err_out_unlock;
6335
6336         if (cmd->port != bp->phy_port &&
6337             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6338                 goto err_out_unlock;
6339
6340         /* If device is down, we can store the settings only if the user
6341          * is setting the currently active port.
6342          */
6343         if (!netif_running(dev) && cmd->port != bp->phy_port)
6344                 goto err_out_unlock;
6345
6346         if (cmd->autoneg == AUTONEG_ENABLE) {
6347                 autoneg |= AUTONEG_SPEED;
6348
6349                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6350
6351                 /* allow advertising 1 speed */
6352                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6353                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6354                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6355                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6356
6357                         if (cmd->port == PORT_FIBRE)
6358                                 goto err_out_unlock;
6359
6360                         advertising = cmd->advertising;
6361
6362                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6363                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6364                             (cmd->port == PORT_TP))
6365                                 goto err_out_unlock;
6366                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6367                         advertising = cmd->advertising;
6368                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6369                         goto err_out_unlock;
6370                 else {
6371                         if (cmd->port == PORT_FIBRE)
6372                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6373                         else
6374                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6375                 }
6376                 advertising |= ADVERTISED_Autoneg;
6377         }
6378         else {
6379                 if (cmd->port == PORT_FIBRE) {
6380                         if ((cmd->speed != SPEED_1000 &&
6381                              cmd->speed != SPEED_2500) ||
6382                             (cmd->duplex != DUPLEX_FULL))
6383                                 goto err_out_unlock;
6384
6385                         if (cmd->speed == SPEED_2500 &&
6386                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6387                                 goto err_out_unlock;
6388                 }
6389                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6390                         goto err_out_unlock;
6391
6392                 autoneg &= ~AUTONEG_SPEED;
6393                 req_line_speed = cmd->speed;
6394                 req_duplex = cmd->duplex;
6395                 advertising = 0;
6396         }
6397
6398         bp->autoneg = autoneg;
6399         bp->advertising = advertising;
6400         bp->req_line_speed = req_line_speed;
6401         bp->req_duplex = req_duplex;
6402
6403         err = 0;
6404         /* If device is down, the new settings will be picked up when it is
6405          * brought up.
6406          */
6407         if (netif_running(dev))
6408                 err = bnx2_setup_phy(bp, cmd->port);
6409
6410 err_out_unlock:
6411         spin_unlock_bh(&bp->phy_lock);
6412
6413         return err;
6414 }
6415
6416 static void
6417 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6418 {
6419         struct bnx2 *bp = netdev_priv(dev);
6420
6421         strcpy(info->driver, DRV_MODULE_NAME);
6422         strcpy(info->version, DRV_MODULE_VERSION);
6423         strcpy(info->bus_info, pci_name(bp->pdev));
6424         strcpy(info->fw_version, bp->fw_version);
6425 }
6426
6427 #define BNX2_REGDUMP_LEN                (32 * 1024)
6428
6429 static int
6430 bnx2_get_regs_len(struct net_device *dev)
6431 {
6432         return BNX2_REGDUMP_LEN;
6433 }
6434
6435 static void
6436 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6437 {
6438         u32 *p = _p, i, offset;
6439         u8 *orig_p = _p;
6440         struct bnx2 *bp = netdev_priv(dev);
6441         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6442                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6443                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6444                                  0x1040, 0x1048, 0x1080, 0x10a4,
6445                                  0x1400, 0x1490, 0x1498, 0x14f0,
6446                                  0x1500, 0x155c, 0x1580, 0x15dc,
6447                                  0x1600, 0x1658, 0x1680, 0x16d8,
6448                                  0x1800, 0x1820, 0x1840, 0x1854,
6449                                  0x1880, 0x1894, 0x1900, 0x1984,
6450                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6451                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6452                                  0x2000, 0x2030, 0x23c0, 0x2400,
6453                                  0x2800, 0x2820, 0x2830, 0x2850,
6454                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6455                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6456                                  0x4080, 0x4090, 0x43c0, 0x4458,
6457                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6458                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6459                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6460                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6461                                  0x6800, 0x6848, 0x684c, 0x6860,
6462                                  0x6888, 0x6910, 0x8000 };
6463
6464         regs->version = 0;
6465
6466         memset(p, 0, BNX2_REGDUMP_LEN);
6467
6468         if (!netif_running(bp->dev))
6469                 return;
6470
6471         i = 0;
6472         offset = reg_boundaries[0];
6473         p += offset;
6474         while (offset < BNX2_REGDUMP_LEN) {
6475                 *p++ = REG_RD(bp, offset);
6476                 offset += 4;
6477                 if (offset == reg_boundaries[i + 1]) {
6478                         offset = reg_boundaries[i + 2];
6479                         p = (u32 *) (orig_p + offset);
6480                         i += 2;
6481                 }
6482         }
6483 }
6484
6485 static void
6486 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6487 {
6488         struct bnx2 *bp = netdev_priv(dev);
6489
6490         if (bp->flags & BNX2_FLAG_NO_WOL) {
6491                 wol->supported = 0;
6492                 wol->wolopts = 0;
6493         }
6494         else {
6495                 wol->supported = WAKE_MAGIC;
6496                 if (bp->wol)
6497                         wol->wolopts = WAKE_MAGIC;
6498                 else
6499                         wol->wolopts = 0;
6500         }
6501         memset(&wol->sopass, 0, sizeof(wol->sopass));
6502 }
6503
6504 static int
6505 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6506 {
6507         struct bnx2 *bp = netdev_priv(dev);
6508
6509         if (wol->wolopts & ~WAKE_MAGIC)
6510                 return -EINVAL;
6511
6512         if (wol->wolopts & WAKE_MAGIC) {
6513                 if (bp->flags & BNX2_FLAG_NO_WOL)
6514                         return -EINVAL;
6515
6516                 bp->wol = 1;
6517         }
6518         else {
6519                 bp->wol = 0;
6520         }
6521         return 0;
6522 }
6523
6524 static int
6525 bnx2_nway_reset(struct net_device *dev)
6526 {
6527         struct bnx2 *bp = netdev_priv(dev);
6528         u32 bmcr;
6529
6530         if (!netif_running(dev))
6531                 return -EAGAIN;
6532
6533         if (!(bp->autoneg & AUTONEG_SPEED)) {
6534                 return -EINVAL;
6535         }
6536
6537         spin_lock_bh(&bp->phy_lock);
6538
6539         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6540                 int rc;
6541
6542                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6543                 spin_unlock_bh(&bp->phy_lock);
6544                 return rc;
6545         }
6546
6547         /* Force a link down visible on the other side */
6548         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6549                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6550                 spin_unlock_bh(&bp->phy_lock);
6551
6552                 msleep(20);
6553
6554                 spin_lock_bh(&bp->phy_lock);
6555
6556                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6557                 bp->serdes_an_pending = 1;
6558                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6559         }
6560
6561         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6562         bmcr &= ~BMCR_LOOPBACK;
6563         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6564
6565         spin_unlock_bh(&bp->phy_lock);
6566
6567         return 0;
6568 }
6569
6570 static int
6571 bnx2_get_eeprom_len(struct net_device *dev)
6572 {
6573         struct bnx2 *bp = netdev_priv(dev);
6574
6575         if (bp->flash_info == NULL)
6576                 return 0;
6577
6578         return (int) bp->flash_size;
6579 }
6580
6581 static int
6582 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6583                 u8 *eebuf)
6584 {
6585         struct bnx2 *bp = netdev_priv(dev);
6586         int rc;
6587
6588         if (!netif_running(dev))
6589                 return -EAGAIN;
6590
6591         /* parameters already validated in ethtool_get_eeprom */
6592
6593         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6594
6595         return rc;
6596 }
6597
6598 static int
6599 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6600                 u8 *eebuf)
6601 {
6602         struct bnx2 *bp = netdev_priv(dev);
6603         int rc;
6604
6605         if (!netif_running(dev))
6606                 return -EAGAIN;
6607
6608         /* parameters already validated in ethtool_set_eeprom */
6609
6610         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6611
6612         return rc;
6613 }
6614
6615 static int
6616 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6617 {
6618         struct bnx2 *bp = netdev_priv(dev);
6619
6620         memset(coal, 0, sizeof(struct ethtool_coalesce));
6621
6622         coal->rx_coalesce_usecs = bp->rx_ticks;
6623         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6624         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6625         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6626
6627         coal->tx_coalesce_usecs = bp->tx_ticks;
6628         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6629         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6630         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6631
6632         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6633
6634         return 0;
6635 }
6636
6637 static int
6638 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6639 {
6640         struct bnx2 *bp = netdev_priv(dev);
6641
6642         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6643         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6644
6645         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6646         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6647
6648         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6649         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6650
6651         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6652         if (bp->rx_quick_cons_trip_int > 0xff)
6653                 bp->rx_quick_cons_trip_int = 0xff;
6654
6655         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6656         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6657
6658         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6659         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6660
6661         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6662         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6663
6664         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6665         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6666                 0xff;
6667
6668         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6669         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6670                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6671                         bp->stats_ticks = USEC_PER_SEC;
6672         }
6673         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6674                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6675         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6676
6677         if (netif_running(bp->dev)) {
6678                 bnx2_netif_stop(bp);
6679                 bnx2_init_nic(bp, 0);
6680                 bnx2_netif_start(bp);
6681         }
6682
6683         return 0;
6684 }
6685
6686 static void
6687 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6688 {
6689         struct bnx2 *bp = netdev_priv(dev);
6690
6691         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6692         ering->rx_mini_max_pending = 0;
6693         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6694
6695         ering->rx_pending = bp->rx_ring_size;
6696         ering->rx_mini_pending = 0;
6697         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6698
6699         ering->tx_max_pending = MAX_TX_DESC_CNT;
6700         ering->tx_pending = bp->tx_ring_size;
6701 }
6702
6703 static int
6704 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6705 {
6706         if (netif_running(bp->dev)) {
6707                 bnx2_netif_stop(bp);
6708                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6709                 bnx2_free_skbs(bp);
6710                 bnx2_free_mem(bp);
6711         }
6712
6713         bnx2_set_rx_ring_size(bp, rx);
6714         bp->tx_ring_size = tx;
6715
6716         if (netif_running(bp->dev)) {
6717                 int rc;
6718
6719                 rc = bnx2_alloc_mem(bp);
6720                 if (rc)
6721                         return rc;
6722                 bnx2_init_nic(bp, 0);
6723                 bnx2_netif_start(bp);
6724         }
6725         return 0;
6726 }
6727
6728 static int
6729 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6730 {
6731         struct bnx2 *bp = netdev_priv(dev);
6732         int rc;
6733
6734         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6735                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6736                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6737
6738                 return -EINVAL;
6739         }
6740         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6741         return rc;
6742 }
6743
6744 static void
6745 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6746 {
6747         struct bnx2 *bp = netdev_priv(dev);
6748
6749         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6750         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6751         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6752 }
6753
6754 static int
6755 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6756 {
6757         struct bnx2 *bp = netdev_priv(dev);
6758
6759         bp->req_flow_ctrl = 0;
6760         if (epause->rx_pause)
6761                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6762         if (epause->tx_pause)
6763                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6764
6765         if (epause->autoneg) {
6766                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6767         }
6768         else {
6769                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6770         }
6771
6772         if (netif_running(dev)) {
6773                 spin_lock_bh(&bp->phy_lock);
6774                 bnx2_setup_phy(bp, bp->phy_port);
6775                 spin_unlock_bh(&bp->phy_lock);
6776         }
6777
6778         return 0;
6779 }
6780
6781 static u32
6782 bnx2_get_rx_csum(struct net_device *dev)
6783 {
6784         struct bnx2 *bp = netdev_priv(dev);
6785
6786         return bp->rx_csum;
6787 }
6788
6789 static int
6790 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6791 {
6792         struct bnx2 *bp = netdev_priv(dev);
6793
6794         bp->rx_csum = data;
6795         return 0;
6796 }
6797
6798 static int
6799 bnx2_set_tso(struct net_device *dev, u32 data)
6800 {
6801         struct bnx2 *bp = netdev_priv(dev);
6802
6803         if (data) {
6804                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6805                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6806                         dev->features |= NETIF_F_TSO6;
6807         } else
6808                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6809                                    NETIF_F_TSO_ECN);
6810         return 0;
6811 }
6812
6813 #define BNX2_NUM_STATS 46
6814
6815 static struct {
6816         char string[ETH_GSTRING_LEN];
6817 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6818         { "rx_bytes" },
6819         { "rx_error_bytes" },
6820         { "tx_bytes" },
6821         { "tx_error_bytes" },
6822         { "rx_ucast_packets" },
6823         { "rx_mcast_packets" },
6824         { "rx_bcast_packets" },
6825         { "tx_ucast_packets" },
6826         { "tx_mcast_packets" },
6827         { "tx_bcast_packets" },
6828         { "tx_mac_errors" },
6829         { "tx_carrier_errors" },
6830         { "rx_crc_errors" },
6831         { "rx_align_errors" },
6832         { "tx_single_collisions" },
6833         { "tx_multi_collisions" },
6834         { "tx_deferred" },
6835         { "tx_excess_collisions" },
6836         { "tx_late_collisions" },
6837         { "tx_total_collisions" },
6838         { "rx_fragments" },
6839         { "rx_jabbers" },
6840         { "rx_undersize_packets" },
6841         { "rx_oversize_packets" },
6842         { "rx_64_byte_packets" },
6843         { "rx_65_to_127_byte_packets" },
6844         { "rx_128_to_255_byte_packets" },
6845         { "rx_256_to_511_byte_packets" },
6846         { "rx_512_to_1023_byte_packets" },
6847         { "rx_1024_to_1522_byte_packets" },
6848         { "rx_1523_to_9022_byte_packets" },
6849         { "tx_64_byte_packets" },
6850         { "tx_65_to_127_byte_packets" },
6851         { "tx_128_to_255_byte_packets" },
6852         { "tx_256_to_511_byte_packets" },
6853         { "tx_512_to_1023_byte_packets" },
6854         { "tx_1024_to_1522_byte_packets" },
6855         { "tx_1523_to_9022_byte_packets" },
6856         { "rx_xon_frames" },
6857         { "rx_xoff_frames" },
6858         { "tx_xon_frames" },
6859         { "tx_xoff_frames" },
6860         { "rx_mac_ctrl_frames" },
6861         { "rx_filtered_packets" },
6862         { "rx_discards" },
6863         { "rx_fw_discards" },
6864 };
6865
6866 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6867
6868 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6869     STATS_OFFSET32(stat_IfHCInOctets_hi),
6870     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6871     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6872     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6873     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6874     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6875     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6876     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6877     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6878     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6879     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6880     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6881     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6882     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6883     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6884     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6885     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6886     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6887     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6888     STATS_OFFSET32(stat_EtherStatsCollisions),
6889     STATS_OFFSET32(stat_EtherStatsFragments),
6890     STATS_OFFSET32(stat_EtherStatsJabbers),
6891     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6892     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6893     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6894     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6895     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6896     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6897     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6898     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6899     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6900     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6901     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6902     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6903     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6904     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6905     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6906     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6907     STATS_OFFSET32(stat_XonPauseFramesReceived),
6908     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6909     STATS_OFFSET32(stat_OutXonSent),
6910     STATS_OFFSET32(stat_OutXoffSent),
6911     STATS_OFFSET32(stat_MacControlFramesReceived),
6912     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6913     STATS_OFFSET32(stat_IfInMBUFDiscards),
6914     STATS_OFFSET32(stat_FwRxDrop),
6915 };
6916
6917 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6918  * skipped because of errata.
6919  */
6920 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6921         8,0,8,8,8,8,8,8,8,8,
6922         4,0,4,4,4,4,4,4,4,4,
6923         4,4,4,4,4,4,4,4,4,4,
6924         4,4,4,4,4,4,4,4,4,4,
6925         4,4,4,4,4,4,
6926 };
6927
6928 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6929         8,0,8,8,8,8,8,8,8,8,
6930         4,4,4,4,4,4,4,4,4,4,
6931         4,4,4,4,4,4,4,4,4,4,
6932         4,4,4,4,4,4,4,4,4,4,
6933         4,4,4,4,4,4,
6934 };
6935
6936 #define BNX2_NUM_TESTS 6
6937
6938 static struct {
6939         char string[ETH_GSTRING_LEN];
6940 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6941         { "register_test (offline)" },
6942         { "memory_test (offline)" },
6943         { "loopback_test (offline)" },
6944         { "nvram_test (online)" },
6945         { "interrupt_test (online)" },
6946         { "link_test (online)" },
6947 };
6948
6949 static int
6950 bnx2_get_sset_count(struct net_device *dev, int sset)
6951 {
6952         switch (sset) {
6953         case ETH_SS_TEST:
6954                 return BNX2_NUM_TESTS;
6955         case ETH_SS_STATS:
6956                 return BNX2_NUM_STATS;
6957         default:
6958                 return -EOPNOTSUPP;
6959         }
6960 }
6961
6962 static void
6963 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6964 {
6965         struct bnx2 *bp = netdev_priv(dev);
6966
6967         bnx2_set_power_state(bp, PCI_D0);
6968
6969         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6970         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6971                 int i;
6972
6973                 bnx2_netif_stop(bp);
6974                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6975                 bnx2_free_skbs(bp);
6976
6977                 if (bnx2_test_registers(bp) != 0) {
6978                         buf[0] = 1;
6979                         etest->flags |= ETH_TEST_FL_FAILED;
6980                 }
6981                 if (bnx2_test_memory(bp) != 0) {
6982                         buf[1] = 1;
6983                         etest->flags |= ETH_TEST_FL_FAILED;
6984                 }
6985                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6986                         etest->flags |= ETH_TEST_FL_FAILED;
6987
6988                 if (!netif_running(bp->dev))
6989                         bnx2_shutdown_chip(bp);
6990                 else {
6991                         bnx2_init_nic(bp, 1);
6992                         bnx2_netif_start(bp);
6993                 }
6994
6995                 /* wait for link up */
6996                 for (i = 0; i < 7; i++) {
6997                         if (bp->link_up)
6998                                 break;
6999                         msleep_interruptible(1000);
7000                 }
7001         }
7002
7003         if (bnx2_test_nvram(bp) != 0) {
7004                 buf[3] = 1;
7005                 etest->flags |= ETH_TEST_FL_FAILED;
7006         }
7007         if (bnx2_test_intr(bp) != 0) {
7008                 buf[4] = 1;
7009                 etest->flags |= ETH_TEST_FL_FAILED;
7010         }
7011
7012         if (bnx2_test_link(bp) != 0) {
7013                 buf[5] = 1;
7014                 etest->flags |= ETH_TEST_FL_FAILED;
7015
7016         }
7017         if (!netif_running(bp->dev))
7018                 bnx2_set_power_state(bp, PCI_D3hot);
7019 }
7020
7021 static void
7022 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7023 {
7024         switch (stringset) {
7025         case ETH_SS_STATS:
7026                 memcpy(buf, bnx2_stats_str_arr,
7027                         sizeof(bnx2_stats_str_arr));
7028                 break;
7029         case ETH_SS_TEST:
7030                 memcpy(buf, bnx2_tests_str_arr,
7031                         sizeof(bnx2_tests_str_arr));
7032                 break;
7033         }
7034 }
7035
7036 static void
7037 bnx2_get_ethtool_stats(struct net_device *dev,
7038                 struct ethtool_stats *stats, u64 *buf)
7039 {
7040         struct bnx2 *bp = netdev_priv(dev);
7041         int i;
7042         u32 *hw_stats = (u32 *) bp->stats_blk;
7043         u8 *stats_len_arr = NULL;
7044
7045         if (hw_stats == NULL) {
7046                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7047                 return;
7048         }
7049
7050         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7051             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7052             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7053             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7054                 stats_len_arr = bnx2_5706_stats_len_arr;
7055         else
7056                 stats_len_arr = bnx2_5708_stats_len_arr;
7057
7058         for (i = 0; i < BNX2_NUM_STATS; i++) {
7059                 if (stats_len_arr[i] == 0) {
7060                         /* skip this counter */
7061                         buf[i] = 0;
7062                         continue;
7063                 }
7064                 if (stats_len_arr[i] == 4) {
7065                         /* 4-byte counter */
7066                         buf[i] = (u64)
7067                                 *(hw_stats + bnx2_stats_offset_arr[i]);
7068                         continue;
7069                 }
7070                 /* 8-byte counter */
7071                 buf[i] = (((u64) *(hw_stats +
7072                                         bnx2_stats_offset_arr[i])) << 32) +
7073                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7074         }
7075 }
7076
7077 static int
7078 bnx2_phys_id(struct net_device *dev, u32 data)
7079 {
7080         struct bnx2 *bp = netdev_priv(dev);
7081         int i;
7082         u32 save;
7083
7084         bnx2_set_power_state(bp, PCI_D0);
7085
7086         if (data == 0)
7087                 data = 2;
7088
7089         save = REG_RD(bp, BNX2_MISC_CFG);
7090         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7091
7092         for (i = 0; i < (data * 2); i++) {
7093                 if ((i % 2) == 0) {
7094                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7095                 }
7096                 else {
7097                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7098                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7099                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7100                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7101                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7102                                 BNX2_EMAC_LED_TRAFFIC);
7103                 }
7104                 msleep_interruptible(500);
7105                 if (signal_pending(current))
7106                         break;
7107         }
7108         REG_WR(bp, BNX2_EMAC_LED, 0);
7109         REG_WR(bp, BNX2_MISC_CFG, save);
7110
7111         if (!netif_running(dev))
7112                 bnx2_set_power_state(bp, PCI_D3hot);
7113
7114         return 0;
7115 }
7116
7117 static int
7118 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7119 {
7120         struct bnx2 *bp = netdev_priv(dev);
7121
7122         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7123                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7124         else
7125                 return (ethtool_op_set_tx_csum(dev, data));
7126 }
7127
7128 static const struct ethtool_ops bnx2_ethtool_ops = {
7129         .get_settings           = bnx2_get_settings,
7130         .set_settings           = bnx2_set_settings,
7131         .get_drvinfo            = bnx2_get_drvinfo,
7132         .get_regs_len           = bnx2_get_regs_len,
7133         .get_regs               = bnx2_get_regs,
7134         .get_wol                = bnx2_get_wol,
7135         .set_wol                = bnx2_set_wol,
7136         .nway_reset             = bnx2_nway_reset,
7137         .get_link               = ethtool_op_get_link,
7138         .get_eeprom_len         = bnx2_get_eeprom_len,
7139         .get_eeprom             = bnx2_get_eeprom,
7140         .set_eeprom             = bnx2_set_eeprom,
7141         .get_coalesce           = bnx2_get_coalesce,
7142         .set_coalesce           = bnx2_set_coalesce,
7143         .get_ringparam          = bnx2_get_ringparam,
7144         .set_ringparam          = bnx2_set_ringparam,
7145         .get_pauseparam         = bnx2_get_pauseparam,
7146         .set_pauseparam         = bnx2_set_pauseparam,
7147         .get_rx_csum            = bnx2_get_rx_csum,
7148         .set_rx_csum            = bnx2_set_rx_csum,
7149         .set_tx_csum            = bnx2_set_tx_csum,
7150         .set_sg                 = ethtool_op_set_sg,
7151         .set_tso                = bnx2_set_tso,
7152         .self_test              = bnx2_self_test,
7153         .get_strings            = bnx2_get_strings,
7154         .phys_id                = bnx2_phys_id,
7155         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7156         .get_sset_count         = bnx2_get_sset_count,
7157 };
7158
7159 /* Called with rtnl_lock */
7160 static int
7161 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7162 {
7163         struct mii_ioctl_data *data = if_mii(ifr);
7164         struct bnx2 *bp = netdev_priv(dev);
7165         int err;
7166
7167         switch(cmd) {
7168         case SIOCGMIIPHY:
7169                 data->phy_id = bp->phy_addr;
7170
7171                 /* fallthru */
7172         case SIOCGMIIREG: {
7173                 u32 mii_regval;
7174
7175                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7176                         return -EOPNOTSUPP;
7177
7178                 if (!netif_running(dev))
7179                         return -EAGAIN;
7180
7181                 spin_lock_bh(&bp->phy_lock);
7182                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7183                 spin_unlock_bh(&bp->phy_lock);
7184
7185                 data->val_out = mii_regval;
7186
7187                 return err;
7188         }
7189
7190         case SIOCSMIIREG:
7191                 if (!capable(CAP_NET_ADMIN))
7192                         return -EPERM;
7193
7194                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7195                         return -EOPNOTSUPP;
7196
7197                 if (!netif_running(dev))
7198                         return -EAGAIN;
7199
7200                 spin_lock_bh(&bp->phy_lock);
7201                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7202                 spin_unlock_bh(&bp->phy_lock);
7203
7204                 return err;
7205
7206         default:
7207                 /* do nothing */
7208                 break;
7209         }
7210         return -EOPNOTSUPP;
7211 }
7212
7213 /* Called with rtnl_lock */
7214 static int
7215 bnx2_change_mac_addr(struct net_device *dev, void *p)
7216 {
7217         struct sockaddr *addr = p;
7218         struct bnx2 *bp = netdev_priv(dev);
7219
7220         if (!is_valid_ether_addr(addr->sa_data))
7221                 return -EINVAL;
7222
7223         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7224         if (netif_running(dev))
7225                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7226
7227         return 0;
7228 }
7229
7230 /* Called with rtnl_lock */
7231 static int
7232 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7233 {
7234         struct bnx2 *bp = netdev_priv(dev);
7235
7236         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7237                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7238                 return -EINVAL;
7239
7240         dev->mtu = new_mtu;
7241         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7242 }
7243
7244 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7245 static void
7246 poll_bnx2(struct net_device *dev)
7247 {
7248         struct bnx2 *bp = netdev_priv(dev);
7249         int i;
7250
7251         for (i = 0; i < bp->irq_nvecs; i++) {
7252                 disable_irq(bp->irq_tbl[i].vector);
7253                 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7254                 enable_irq(bp->irq_tbl[i].vector);
7255         }
7256 }
7257 #endif
7258
7259 static void __devinit
7260 bnx2_get_5709_media(struct bnx2 *bp)
7261 {
7262         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7263         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7264         u32 strap;
7265
7266         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7267                 return;
7268         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7269                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7270                 return;
7271         }
7272
7273         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7274                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7275         else
7276                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7277
7278         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7279                 switch (strap) {
7280                 case 0x4:
7281                 case 0x5:
7282                 case 0x6:
7283                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7284                         return;
7285                 }
7286         } else {
7287                 switch (strap) {
7288                 case 0x1:
7289                 case 0x2:
7290                 case 0x4:
7291                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7292                         return;
7293                 }
7294         }
7295 }
7296
7297 static void __devinit
7298 bnx2_get_pci_speed(struct bnx2 *bp)
7299 {
7300         u32 reg;
7301
7302         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7303         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7304                 u32 clkreg;
7305
7306                 bp->flags |= BNX2_FLAG_PCIX;
7307
7308                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7309
7310                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7311                 switch (clkreg) {
7312                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7313                         bp->bus_speed_mhz = 133;
7314                         break;
7315
7316                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7317                         bp->bus_speed_mhz = 100;
7318                         break;
7319
7320                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7321                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7322                         bp->bus_speed_mhz = 66;
7323                         break;
7324
7325                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7326                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7327                         bp->bus_speed_mhz = 50;
7328                         break;
7329
7330                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7331                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7332                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7333                         bp->bus_speed_mhz = 33;
7334                         break;
7335                 }
7336         }
7337         else {
7338                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7339                         bp->bus_speed_mhz = 66;
7340                 else
7341                         bp->bus_speed_mhz = 33;
7342         }
7343
7344         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7345                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7346
7347 }
7348
7349 static int __devinit
7350 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7351 {
7352         struct bnx2 *bp;
7353         unsigned long mem_len;
7354         int rc, i, j;
7355         u32 reg;
7356         u64 dma_mask, persist_dma_mask;
7357
7358         SET_NETDEV_DEV(dev, &pdev->dev);
7359         bp = netdev_priv(dev);
7360
7361         bp->flags = 0;
7362         bp->phy_flags = 0;
7363
7364         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7365         rc = pci_enable_device(pdev);
7366         if (rc) {
7367                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7368                 goto err_out;
7369         }
7370
7371         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7372                 dev_err(&pdev->dev,
7373                         "Cannot find PCI device base address, aborting.\n");
7374                 rc = -ENODEV;
7375                 goto err_out_disable;
7376         }
7377
7378         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7379         if (rc) {
7380                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7381                 goto err_out_disable;
7382         }
7383
7384         pci_set_master(pdev);
7385         pci_save_state(pdev);
7386
7387         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7388         if (bp->pm_cap == 0) {
7389                 dev_err(&pdev->dev,
7390                         "Cannot find power management capability, aborting.\n");
7391                 rc = -EIO;
7392                 goto err_out_release;
7393         }
7394
7395         bp->dev = dev;
7396         bp->pdev = pdev;
7397
7398         spin_lock_init(&bp->phy_lock);
7399         spin_lock_init(&bp->indirect_lock);
7400         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7401
7402         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7403         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7404         dev->mem_end = dev->mem_start + mem_len;
7405         dev->irq = pdev->irq;
7406
7407         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7408
7409         if (!bp->regview) {
7410                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7411                 rc = -ENOMEM;
7412                 goto err_out_release;
7413         }
7414
7415         /* Configure byte swap and enable write to the reg_window registers.
7416          * Rely on CPU to do target byte swapping on big endian systems
7417          * The chip's target access swapping will not swap all accesses
7418          */
7419         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7420                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7421                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7422
7423         bnx2_set_power_state(bp, PCI_D0);
7424
7425         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7426
7427         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7428                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7429                         dev_err(&pdev->dev,
7430                                 "Cannot find PCIE capability, aborting.\n");
7431                         rc = -EIO;
7432                         goto err_out_unmap;
7433                 }
7434                 bp->flags |= BNX2_FLAG_PCIE;
7435                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7436                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7437         } else {
7438                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7439                 if (bp->pcix_cap == 0) {
7440                         dev_err(&pdev->dev,
7441                                 "Cannot find PCIX capability, aborting.\n");
7442                         rc = -EIO;
7443                         goto err_out_unmap;
7444                 }
7445         }
7446
7447         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7448                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7449                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7450         }
7451
7452         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7453                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7454                         bp->flags |= BNX2_FLAG_MSI_CAP;
7455         }
7456
7457         /* 5708 cannot support DMA addresses > 40-bit.  */
7458         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7459                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7460         else
7461                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7462
7463         /* Configure DMA attributes. */
7464         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7465                 dev->features |= NETIF_F_HIGHDMA;
7466                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7467                 if (rc) {
7468                         dev_err(&pdev->dev,
7469                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7470                         goto err_out_unmap;
7471                 }
7472         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7473                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7474                 goto err_out_unmap;
7475         }
7476
7477         if (!(bp->flags & BNX2_FLAG_PCIE))
7478                 bnx2_get_pci_speed(bp);
7479
7480         /* 5706A0 may falsely detect SERR and PERR. */
7481         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7482                 reg = REG_RD(bp, PCI_COMMAND);
7483                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7484                 REG_WR(bp, PCI_COMMAND, reg);
7485         }
7486         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7487                 !(bp->flags & BNX2_FLAG_PCIX)) {
7488
7489                 dev_err(&pdev->dev,
7490                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7491                 goto err_out_unmap;
7492         }
7493
7494         bnx2_init_nvram(bp);
7495
7496         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7497
7498         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7499             BNX2_SHM_HDR_SIGNATURE_SIG) {
7500                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7501
7502                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7503         } else
7504                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7505
7506         /* Get the permanent MAC address.  First we need to make sure the
7507          * firmware is actually running.
7508          */
7509         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7510
7511         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7512             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7513                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7514                 rc = -ENODEV;
7515                 goto err_out_unmap;
7516         }
7517
7518         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7519         for (i = 0, j = 0; i < 3; i++) {
7520                 u8 num, k, skip0;
7521
7522                 num = (u8) (reg >> (24 - (i * 8)));
7523                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7524                         if (num >= k || !skip0 || k == 1) {
7525                                 bp->fw_version[j++] = (num / k) + '0';
7526                                 skip0 = 0;
7527                         }
7528                 }
7529                 if (i != 2)
7530                         bp->fw_version[j++] = '.';
7531         }
7532         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7533         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7534                 bp->wol = 1;
7535
7536         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7537                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7538
7539                 for (i = 0; i < 30; i++) {
7540                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7541                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7542                                 break;
7543                         msleep(10);
7544                 }
7545         }
7546         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7547         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7548         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7549             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7550                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7551
7552                 bp->fw_version[j++] = ' ';
7553                 for (i = 0; i < 3; i++) {
7554                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7555                         reg = swab32(reg);
7556                         memcpy(&bp->fw_version[j], &reg, 4);
7557                         j += 4;
7558                 }
7559         }
7560
7561         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7562         bp->mac_addr[0] = (u8) (reg >> 8);
7563         bp->mac_addr[1] = (u8) reg;
7564
7565         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7566         bp->mac_addr[2] = (u8) (reg >> 24);
7567         bp->mac_addr[3] = (u8) (reg >> 16);
7568         bp->mac_addr[4] = (u8) (reg >> 8);
7569         bp->mac_addr[5] = (u8) reg;
7570
7571         bp->tx_ring_size = MAX_TX_DESC_CNT;
7572         bnx2_set_rx_ring_size(bp, 255);
7573
7574         bp->rx_csum = 1;
7575
7576         bp->tx_quick_cons_trip_int = 20;
7577         bp->tx_quick_cons_trip = 20;
7578         bp->tx_ticks_int = 80;
7579         bp->tx_ticks = 80;
7580
7581         bp->rx_quick_cons_trip_int = 6;
7582         bp->rx_quick_cons_trip = 6;
7583         bp->rx_ticks_int = 18;
7584         bp->rx_ticks = 18;
7585
7586         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7587
7588         bp->current_interval = BNX2_TIMER_INTERVAL;
7589
7590         bp->phy_addr = 1;
7591
7592         /* Disable WOL support if we are running on a SERDES chip. */
7593         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7594                 bnx2_get_5709_media(bp);
7595         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7596                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7597
7598         bp->phy_port = PORT_TP;
7599         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7600                 bp->phy_port = PORT_FIBRE;
7601                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7602                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7603                         bp->flags |= BNX2_FLAG_NO_WOL;
7604                         bp->wol = 0;
7605                 }
7606                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7607                         /* Don't do parallel detect on this board because of
7608                          * some board problems.  The link will not go down
7609                          * if we do parallel detect.
7610                          */
7611                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7612                             pdev->subsystem_device == 0x310c)
7613                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7614                 } else {
7615                         bp->phy_addr = 2;
7616                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7617                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7618                 }
7619         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7620                    CHIP_NUM(bp) == CHIP_NUM_5708)
7621                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7622         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7623                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7624                   CHIP_REV(bp) == CHIP_REV_Bx))
7625                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7626
7627         bnx2_init_fw_cap(bp);
7628
7629         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7630             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7631             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7632             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7633                 bp->flags |= BNX2_FLAG_NO_WOL;
7634                 bp->wol = 0;
7635         }
7636
7637         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7638                 bp->tx_quick_cons_trip_int =
7639                         bp->tx_quick_cons_trip;
7640                 bp->tx_ticks_int = bp->tx_ticks;
7641                 bp->rx_quick_cons_trip_int =
7642                         bp->rx_quick_cons_trip;
7643                 bp->rx_ticks_int = bp->rx_ticks;
7644                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7645                 bp->com_ticks_int = bp->com_ticks;
7646                 bp->cmd_ticks_int = bp->cmd_ticks;
7647         }
7648
7649         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7650          *
7651          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7652          * with byte enables disabled on the unused 32-bit word.  This is legal
7653          * but causes problems on the AMD 8132 which will eventually stop
7654          * responding after a while.
7655          *
7656          * AMD believes this incompatibility is unique to the 5706, and
7657          * prefers to locally disable MSI rather than globally disabling it.
7658          */
7659         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7660                 struct pci_dev *amd_8132 = NULL;
7661
7662                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7663                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7664                                                   amd_8132))) {
7665
7666                         if (amd_8132->revision >= 0x10 &&
7667                             amd_8132->revision <= 0x13) {
7668                                 disable_msi = 1;
7669                                 pci_dev_put(amd_8132);
7670                                 break;
7671                         }
7672                 }
7673         }
7674
7675         bnx2_set_default_link(bp);
7676         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7677
7678         init_timer(&bp->timer);
7679         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7680         bp->timer.data = (unsigned long) bp;
7681         bp->timer.function = bnx2_timer;
7682
7683         return 0;
7684
7685 err_out_unmap:
7686         if (bp->regview) {
7687                 iounmap(bp->regview);
7688                 bp->regview = NULL;
7689         }
7690
7691 err_out_release:
7692         pci_release_regions(pdev);
7693
7694 err_out_disable:
7695         pci_disable_device(pdev);
7696         pci_set_drvdata(pdev, NULL);
7697
7698 err_out:
7699         return rc;
7700 }
7701
7702 static char * __devinit
7703 bnx2_bus_string(struct bnx2 *bp, char *str)
7704 {
7705         char *s = str;
7706
7707         if (bp->flags & BNX2_FLAG_PCIE) {
7708                 s += sprintf(s, "PCI Express");
7709         } else {
7710                 s += sprintf(s, "PCI");
7711                 if (bp->flags & BNX2_FLAG_PCIX)
7712                         s += sprintf(s, "-X");
7713                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7714                         s += sprintf(s, " 32-bit");
7715                 else
7716                         s += sprintf(s, " 64-bit");
7717                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7718         }
7719         return str;
7720 }
7721
7722 static void __devinit
7723 bnx2_init_napi(struct bnx2 *bp)
7724 {
7725         int i;
7726
7727         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7728                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7729                 int (*poll)(struct napi_struct *, int);
7730
7731                 if (i == 0)
7732                         poll = bnx2_poll;
7733                 else
7734                         poll = bnx2_poll_msix;
7735
7736                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7737                 bnapi->bp = bp;
7738         }
7739 }
7740
7741 static const struct net_device_ops bnx2_netdev_ops = {
7742         .ndo_open               = bnx2_open,
7743         .ndo_start_xmit         = bnx2_start_xmit,
7744         .ndo_stop               = bnx2_close,
7745         .ndo_get_stats          = bnx2_get_stats,
7746         .ndo_set_rx_mode        = bnx2_set_rx_mode,
7747         .ndo_do_ioctl           = bnx2_ioctl,
7748         .ndo_validate_addr      = eth_validate_addr,
7749         .ndo_set_mac_address    = bnx2_change_mac_addr,
7750         .ndo_change_mtu         = bnx2_change_mtu,
7751         .ndo_tx_timeout         = bnx2_tx_timeout,
7752 #ifdef BCM_VLAN
7753         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
7754 #endif
7755 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7756         .ndo_poll_controller    = poll_bnx2,
7757 #endif
7758 };
7759
7760 static int __devinit
7761 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7762 {
7763         static int version_printed = 0;
7764         struct net_device *dev = NULL;
7765         struct bnx2 *bp;
7766         int rc;
7767         char str[40];
7768
7769         if (version_printed++ == 0)
7770                 printk(KERN_INFO "%s", version);
7771
7772         /* dev zeroed in init_etherdev */
7773         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7774
7775         if (!dev)
7776                 return -ENOMEM;
7777
7778         rc = bnx2_init_board(pdev, dev);
7779         if (rc < 0) {
7780                 free_netdev(dev);
7781                 return rc;
7782         }
7783
7784         dev->netdev_ops = &bnx2_netdev_ops;
7785         dev->watchdog_timeo = TX_TIMEOUT;
7786         dev->ethtool_ops = &bnx2_ethtool_ops;
7787
7788         bp = netdev_priv(dev);
7789         bnx2_init_napi(bp);
7790
7791         pci_set_drvdata(pdev, dev);
7792
7793         memcpy(dev->dev_addr, bp->mac_addr, 6);
7794         memcpy(dev->perm_addr, bp->mac_addr, 6);
7795
7796         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7797         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7798                 dev->features |= NETIF_F_IPV6_CSUM;
7799
7800 #ifdef BCM_VLAN
7801         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7802 #endif
7803         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7804         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7805                 dev->features |= NETIF_F_TSO6;
7806
7807         if ((rc = register_netdev(dev))) {
7808                 dev_err(&pdev->dev, "Cannot register net device\n");
7809                 if (bp->regview)
7810                         iounmap(bp->regview);
7811                 pci_release_regions(pdev);
7812                 pci_disable_device(pdev);
7813                 pci_set_drvdata(pdev, NULL);
7814                 free_netdev(dev);
7815                 return rc;
7816         }
7817
7818         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7819                 "IRQ %d, node addr %pM\n",
7820                 dev->name,
7821                 board_info[ent->driver_data].name,
7822                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7823                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7824                 bnx2_bus_string(bp, str),
7825                 dev->base_addr,
7826                 bp->pdev->irq, dev->dev_addr);
7827
7828         return 0;
7829 }
7830
7831 static void __devexit
7832 bnx2_remove_one(struct pci_dev *pdev)
7833 {
7834         struct net_device *dev = pci_get_drvdata(pdev);
7835         struct bnx2 *bp = netdev_priv(dev);
7836
7837         flush_scheduled_work();
7838
7839         unregister_netdev(dev);
7840
7841         if (bp->regview)
7842                 iounmap(bp->regview);
7843
7844         free_netdev(dev);
7845         pci_release_regions(pdev);
7846         pci_disable_device(pdev);
7847         pci_set_drvdata(pdev, NULL);
7848 }
7849
7850 static int
7851 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7852 {
7853         struct net_device *dev = pci_get_drvdata(pdev);
7854         struct bnx2 *bp = netdev_priv(dev);
7855
7856         /* PCI register 4 needs to be saved whether netif_running() or not.
7857          * MSI address and data need to be saved if using MSI and
7858          * netif_running().
7859          */
7860         pci_save_state(pdev);
7861         if (!netif_running(dev))
7862                 return 0;
7863
7864         flush_scheduled_work();
7865         bnx2_netif_stop(bp);
7866         netif_device_detach(dev);
7867         del_timer_sync(&bp->timer);
7868         bnx2_shutdown_chip(bp);
7869         bnx2_free_skbs(bp);
7870         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7871         return 0;
7872 }
7873
7874 static int
7875 bnx2_resume(struct pci_dev *pdev)
7876 {
7877         struct net_device *dev = pci_get_drvdata(pdev);
7878         struct bnx2 *bp = netdev_priv(dev);
7879
7880         pci_restore_state(pdev);
7881         if (!netif_running(dev))
7882                 return 0;
7883
7884         bnx2_set_power_state(bp, PCI_D0);
7885         netif_device_attach(dev);
7886         bnx2_init_nic(bp, 1);
7887         bnx2_netif_start(bp);
7888         return 0;
7889 }
7890
7891 /**
7892  * bnx2_io_error_detected - called when PCI error is detected
7893  * @pdev: Pointer to PCI device
7894  * @state: The current pci connection state
7895  *
7896  * This function is called after a PCI bus error affecting
7897  * this device has been detected.
7898  */
7899 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7900                                                pci_channel_state_t state)
7901 {
7902         struct net_device *dev = pci_get_drvdata(pdev);
7903         struct bnx2 *bp = netdev_priv(dev);
7904
7905         rtnl_lock();
7906         netif_device_detach(dev);
7907
7908         if (netif_running(dev)) {
7909                 bnx2_netif_stop(bp);
7910                 del_timer_sync(&bp->timer);
7911                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7912         }
7913
7914         pci_disable_device(pdev);
7915         rtnl_unlock();
7916
7917         /* Request a slot slot reset. */
7918         return PCI_ERS_RESULT_NEED_RESET;
7919 }
7920
7921 /**
7922  * bnx2_io_slot_reset - called after the pci bus has been reset.
7923  * @pdev: Pointer to PCI device
7924  *
7925  * Restart the card from scratch, as if from a cold-boot.
7926  */
7927 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7928 {
7929         struct net_device *dev = pci_get_drvdata(pdev);
7930         struct bnx2 *bp = netdev_priv(dev);
7931
7932         rtnl_lock();
7933         if (pci_enable_device(pdev)) {
7934                 dev_err(&pdev->dev,
7935                         "Cannot re-enable PCI device after reset.\n");
7936                 rtnl_unlock();
7937                 return PCI_ERS_RESULT_DISCONNECT;
7938         }
7939         pci_set_master(pdev);
7940         pci_restore_state(pdev);
7941
7942         if (netif_running(dev)) {
7943                 bnx2_set_power_state(bp, PCI_D0);
7944                 bnx2_init_nic(bp, 1);
7945         }
7946
7947         rtnl_unlock();
7948         return PCI_ERS_RESULT_RECOVERED;
7949 }
7950
7951 /**
7952  * bnx2_io_resume - called when traffic can start flowing again.
7953  * @pdev: Pointer to PCI device
7954  *
7955  * This callback is called when the error recovery driver tells us that
7956  * its OK to resume normal operation.
7957  */
7958 static void bnx2_io_resume(struct pci_dev *pdev)
7959 {
7960         struct net_device *dev = pci_get_drvdata(pdev);
7961         struct bnx2 *bp = netdev_priv(dev);
7962
7963         rtnl_lock();
7964         if (netif_running(dev))
7965                 bnx2_netif_start(bp);
7966
7967         netif_device_attach(dev);
7968         rtnl_unlock();
7969 }
7970
7971 static struct pci_error_handlers bnx2_err_handler = {
7972         .error_detected = bnx2_io_error_detected,
7973         .slot_reset     = bnx2_io_slot_reset,
7974         .resume         = bnx2_io_resume,
7975 };
7976
7977 static struct pci_driver bnx2_pci_driver = {
7978         .name           = DRV_MODULE_NAME,
7979         .id_table       = bnx2_pci_tbl,
7980         .probe          = bnx2_init_one,
7981         .remove         = __devexit_p(bnx2_remove_one),
7982         .suspend        = bnx2_suspend,
7983         .resume         = bnx2_resume,
7984         .err_handler    = &bnx2_err_handler,
7985 };
7986
7987 static int __init bnx2_init(void)
7988 {
7989         return pci_register_driver(&bnx2_pci_driver);
7990 }
7991
7992 static void __exit bnx2_cleanup(void)
7993 {
7994         pci_unregister_driver(&bnx2_pci_driver);
7995 }
7996
7997 module_init(bnx2_init);
7998 module_exit(bnx2_cleanup);
7999
8000
8001