Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfashe...
[pandora-kernel.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #include <linux/config.h>
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <asm/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #ifdef NETIF_F_HW_VLAN_TX
40 #include <linux/if_vlan.h>
41 #define BCM_VLAN 1
42 #endif
43 #ifdef NETIF_F_TSO
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #define BCM_TSO 1
48 #endif
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/prefetch.h>
52 #include <linux/cache.h>
53 #include <linux/zlib.h>
54
55 #include "bnx2.h"
56 #include "bnx2_fw.h"
57
58 #define DRV_MODULE_NAME         "bnx2"
59 #define PFX DRV_MODULE_NAME     ": "
60 #define DRV_MODULE_VERSION      "1.4.43"
61 #define DRV_MODULE_RELDATE      "June 28, 2006"
62
63 #define RUN_AT(x) (jiffies + (x))
64
65 /* Time in jiffies before concluding the transmitter is hung. */
66 #define TX_TIMEOUT  (5*HZ)
67
68 static const char version[] __devinitdata =
69         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_msi = 0;
77
78 module_param(disable_msi, int, 0);
79 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81 typedef enum {
82         BCM5706 = 0,
83         NC370T,
84         NC370I,
85         BCM5706S,
86         NC370F,
87         BCM5708,
88         BCM5708S,
89 } board_t;
90
91 /* indexed by board_t, above */
92 static const struct {
93         char *name;
94 } board_info[] __devinitdata = {
95         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
96         { "HP NC370T Multifunction Gigabit Server Adapter" },
97         { "HP NC370i Multifunction Gigabit Server Adapter" },
98         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
99         { "HP NC370F Multifunction Gigabit Server Adapter" },
100         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
102         };
103
104 static struct pci_device_id bnx2_pci_tbl[] = {
105         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
106           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
107         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
109         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
111         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
112           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
113         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
114           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
118           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
119         { 0, }
120 };
121
122 static struct flash_spec flash_table[] =
123 {
124         /* Slow EEPROM */
125         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
126          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128          "EEPROM - slow"},
129         /* Expansion entry 0001 */
130         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
131          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133          "Entry 0001"},
134         /* Saifun SA25F010 (non-buffered flash) */
135         /* strap, cfg1, & write1 need updates */
136         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
137          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
138          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
139          "Non-buffered flash (128kB)"},
140         /* Saifun SA25F020 (non-buffered flash) */
141         /* strap, cfg1, & write1 need updates */
142         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
143          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
144          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
145          "Non-buffered flash (256kB)"},
146         /* Expansion entry 0100 */
147         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
148          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
150          "Entry 0100"},
151         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
152         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,        
153          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
154          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
155          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
156         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
157         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
158          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
160          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
161         /* Saifun SA25F005 (non-buffered flash) */
162         /* strap, cfg1, & write1 need updates */
163         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
164          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
166          "Non-buffered flash (64kB)"},
167         /* Fast EEPROM */
168         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
169          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
170          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
171          "EEPROM - fast"},
172         /* Expansion entry 1001 */
173         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
174          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176          "Entry 1001"},
177         /* Expansion entry 1010 */
178         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
179          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181          "Entry 1010"},
182         /* ATMEL AT45DB011B (buffered flash) */
183         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
184          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
185          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
186          "Buffered flash (128kB)"},
187         /* Expansion entry 1100 */
188         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
189          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
190          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191          "Entry 1100"},
192         /* Expansion entry 1101 */
193         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
194          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196          "Entry 1101"},
197         /* Ateml Expansion entry 1110 */
198         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
199          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
200          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
201          "Entry 1110 (Atmel)"},
202         /* ATMEL AT45DB021B (buffered flash) */
203         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
204          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
206          "Buffered flash (256kB)"},
207 };
208
209 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
210
211 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
212 {
213         u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
214
215         if (diff > MAX_TX_DESC_CNT)
216                 diff = (diff & MAX_TX_DESC_CNT) - 1;
217         return (bp->tx_ring_size - diff);
218 }
219
220 static u32
221 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
222 {
223         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
224         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
225 }
226
227 static void
228 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
229 {
230         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
231         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
232 }
233
234 static void
235 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
236 {
237         offset += cid_addr;
238         REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
239         REG_WR(bp, BNX2_CTX_DATA, val);
240 }
241
242 static int
243 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
244 {
245         u32 val1;
246         int i, ret;
247
248         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
249                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
250                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
251
252                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
253                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
254
255                 udelay(40);
256         }
257
258         val1 = (bp->phy_addr << 21) | (reg << 16) |
259                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
260                 BNX2_EMAC_MDIO_COMM_START_BUSY;
261         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
262
263         for (i = 0; i < 50; i++) {
264                 udelay(10);
265
266                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
267                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
268                         udelay(5);
269
270                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
271                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
272
273                         break;
274                 }
275         }
276
277         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
278                 *val = 0x0;
279                 ret = -EBUSY;
280         }
281         else {
282                 *val = val1;
283                 ret = 0;
284         }
285
286         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293                 udelay(40);
294         }
295
296         return ret;
297 }
298
299 static int
300 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
301 {
302         u32 val1;
303         int i, ret;
304
305         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
306                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
307                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
308
309                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
310                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
311
312                 udelay(40);
313         }
314
315         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
316                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
317                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
318         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
319     
320         for (i = 0; i < 50; i++) {
321                 udelay(10);
322
323                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
324                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
325                         udelay(5);
326                         break;
327                 }
328         }
329
330         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
331                 ret = -EBUSY;
332         else
333                 ret = 0;
334
335         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
336                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
337                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
338
339                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
340                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
341
342                 udelay(40);
343         }
344
345         return ret;
346 }
347
348 static void
349 bnx2_disable_int(struct bnx2 *bp)
350 {
351         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
352                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
353         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
354 }
355
356 static void
357 bnx2_enable_int(struct bnx2 *bp)
358 {
359         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
360                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
361                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
362
363         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
364                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
365
366         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
367 }
368
369 static void
370 bnx2_disable_int_sync(struct bnx2 *bp)
371 {
372         atomic_inc(&bp->intr_sem);
373         bnx2_disable_int(bp);
374         synchronize_irq(bp->pdev->irq);
375 }
376
377 static void
378 bnx2_netif_stop(struct bnx2 *bp)
379 {
380         bnx2_disable_int_sync(bp);
381         if (netif_running(bp->dev)) {
382                 netif_poll_disable(bp->dev);
383                 netif_tx_disable(bp->dev);
384                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
385         }
386 }
387
388 static void
389 bnx2_netif_start(struct bnx2 *bp)
390 {
391         if (atomic_dec_and_test(&bp->intr_sem)) {
392                 if (netif_running(bp->dev)) {
393                         netif_wake_queue(bp->dev);
394                         netif_poll_enable(bp->dev);
395                         bnx2_enable_int(bp);
396                 }
397         }
398 }
399
400 static void
401 bnx2_free_mem(struct bnx2 *bp)
402 {
403         int i;
404
405         if (bp->status_blk) {
406                 pci_free_consistent(bp->pdev, bp->status_stats_size,
407                                     bp->status_blk, bp->status_blk_mapping);
408                 bp->status_blk = NULL;
409                 bp->stats_blk = NULL;
410         }
411         if (bp->tx_desc_ring) {
412                 pci_free_consistent(bp->pdev,
413                                     sizeof(struct tx_bd) * TX_DESC_CNT,
414                                     bp->tx_desc_ring, bp->tx_desc_mapping);
415                 bp->tx_desc_ring = NULL;
416         }
417         kfree(bp->tx_buf_ring);
418         bp->tx_buf_ring = NULL;
419         for (i = 0; i < bp->rx_max_ring; i++) {
420                 if (bp->rx_desc_ring[i])
421                         pci_free_consistent(bp->pdev,
422                                             sizeof(struct rx_bd) * RX_DESC_CNT,
423                                             bp->rx_desc_ring[i],
424                                             bp->rx_desc_mapping[i]);
425                 bp->rx_desc_ring[i] = NULL;
426         }
427         vfree(bp->rx_buf_ring);
428         bp->rx_buf_ring = NULL;
429 }
430
431 static int
432 bnx2_alloc_mem(struct bnx2 *bp)
433 {
434         int i, status_blk_size;
435
436         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
437                                   GFP_KERNEL);
438         if (bp->tx_buf_ring == NULL)
439                 return -ENOMEM;
440
441         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
442                                                 sizeof(struct tx_bd) *
443                                                 TX_DESC_CNT,
444                                                 &bp->tx_desc_mapping);
445         if (bp->tx_desc_ring == NULL)
446                 goto alloc_mem_err;
447
448         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
449                                   bp->rx_max_ring);
450         if (bp->rx_buf_ring == NULL)
451                 goto alloc_mem_err;
452
453         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
454                                    bp->rx_max_ring);
455
456         for (i = 0; i < bp->rx_max_ring; i++) {
457                 bp->rx_desc_ring[i] =
458                         pci_alloc_consistent(bp->pdev,
459                                              sizeof(struct rx_bd) * RX_DESC_CNT,
460                                              &bp->rx_desc_mapping[i]);
461                 if (bp->rx_desc_ring[i] == NULL)
462                         goto alloc_mem_err;
463
464         }
465
466         /* Combine status and statistics blocks into one allocation. */
467         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
468         bp->status_stats_size = status_blk_size +
469                                 sizeof(struct statistics_block);
470
471         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
472                                               &bp->status_blk_mapping);
473         if (bp->status_blk == NULL)
474                 goto alloc_mem_err;
475
476         memset(bp->status_blk, 0, bp->status_stats_size);
477
478         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
479                                   status_blk_size);
480
481         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
482
483         return 0;
484
485 alloc_mem_err:
486         bnx2_free_mem(bp);
487         return -ENOMEM;
488 }
489
490 static void
491 bnx2_report_fw_link(struct bnx2 *bp)
492 {
493         u32 fw_link_status = 0;
494
495         if (bp->link_up) {
496                 u32 bmsr;
497
498                 switch (bp->line_speed) {
499                 case SPEED_10:
500                         if (bp->duplex == DUPLEX_HALF)
501                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
502                         else
503                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
504                         break;
505                 case SPEED_100:
506                         if (bp->duplex == DUPLEX_HALF)
507                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
508                         else
509                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
510                         break;
511                 case SPEED_1000:
512                         if (bp->duplex == DUPLEX_HALF)
513                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
514                         else
515                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
516                         break;
517                 case SPEED_2500:
518                         if (bp->duplex == DUPLEX_HALF)
519                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
520                         else
521                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
522                         break;
523                 }
524
525                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
526
527                 if (bp->autoneg) {
528                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
529
530                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
531                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
532
533                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
534                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
535                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
536                         else
537                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
538                 }
539         }
540         else
541                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
542
543         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
544 }
545
546 static void
547 bnx2_report_link(struct bnx2 *bp)
548 {
549         if (bp->link_up) {
550                 netif_carrier_on(bp->dev);
551                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
552
553                 printk("%d Mbps ", bp->line_speed);
554
555                 if (bp->duplex == DUPLEX_FULL)
556                         printk("full duplex");
557                 else
558                         printk("half duplex");
559
560                 if (bp->flow_ctrl) {
561                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
562                                 printk(", receive ");
563                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
564                                         printk("& transmit ");
565                         }
566                         else {
567                                 printk(", transmit ");
568                         }
569                         printk("flow control ON");
570                 }
571                 printk("\n");
572         }
573         else {
574                 netif_carrier_off(bp->dev);
575                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
576         }
577
578         bnx2_report_fw_link(bp);
579 }
580
581 static void
582 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
583 {
584         u32 local_adv, remote_adv;
585
586         bp->flow_ctrl = 0;
587         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != 
588                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
589
590                 if (bp->duplex == DUPLEX_FULL) {
591                         bp->flow_ctrl = bp->req_flow_ctrl;
592                 }
593                 return;
594         }
595
596         if (bp->duplex != DUPLEX_FULL) {
597                 return;
598         }
599
600         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
601             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
602                 u32 val;
603
604                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
605                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
606                         bp->flow_ctrl |= FLOW_CTRL_TX;
607                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
608                         bp->flow_ctrl |= FLOW_CTRL_RX;
609                 return;
610         }
611
612         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
613         bnx2_read_phy(bp, MII_LPA, &remote_adv);
614
615         if (bp->phy_flags & PHY_SERDES_FLAG) {
616                 u32 new_local_adv = 0;
617                 u32 new_remote_adv = 0;
618
619                 if (local_adv & ADVERTISE_1000XPAUSE)
620                         new_local_adv |= ADVERTISE_PAUSE_CAP;
621                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
622                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
623                 if (remote_adv & ADVERTISE_1000XPAUSE)
624                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
625                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
626                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
627
628                 local_adv = new_local_adv;
629                 remote_adv = new_remote_adv;
630         }
631
632         /* See Table 28B-3 of 802.3ab-1999 spec. */
633         if (local_adv & ADVERTISE_PAUSE_CAP) {
634                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
635                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
636                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
637                         }
638                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
639                                 bp->flow_ctrl = FLOW_CTRL_RX;
640                         }
641                 }
642                 else {
643                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
644                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
645                         }
646                 }
647         }
648         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
649                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
650                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
651
652                         bp->flow_ctrl = FLOW_CTRL_TX;
653                 }
654         }
655 }
656
657 static int
658 bnx2_5708s_linkup(struct bnx2 *bp)
659 {
660         u32 val;
661
662         bp->link_up = 1;
663         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
664         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
665                 case BCM5708S_1000X_STAT1_SPEED_10:
666                         bp->line_speed = SPEED_10;
667                         break;
668                 case BCM5708S_1000X_STAT1_SPEED_100:
669                         bp->line_speed = SPEED_100;
670                         break;
671                 case BCM5708S_1000X_STAT1_SPEED_1G:
672                         bp->line_speed = SPEED_1000;
673                         break;
674                 case BCM5708S_1000X_STAT1_SPEED_2G5:
675                         bp->line_speed = SPEED_2500;
676                         break;
677         }
678         if (val & BCM5708S_1000X_STAT1_FD)
679                 bp->duplex = DUPLEX_FULL;
680         else
681                 bp->duplex = DUPLEX_HALF;
682
683         return 0;
684 }
685
686 static int
687 bnx2_5706s_linkup(struct bnx2 *bp)
688 {
689         u32 bmcr, local_adv, remote_adv, common;
690
691         bp->link_up = 1;
692         bp->line_speed = SPEED_1000;
693
694         bnx2_read_phy(bp, MII_BMCR, &bmcr);
695         if (bmcr & BMCR_FULLDPLX) {
696                 bp->duplex = DUPLEX_FULL;
697         }
698         else {
699                 bp->duplex = DUPLEX_HALF;
700         }
701
702         if (!(bmcr & BMCR_ANENABLE)) {
703                 return 0;
704         }
705
706         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
707         bnx2_read_phy(bp, MII_LPA, &remote_adv);
708
709         common = local_adv & remote_adv;
710         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
711
712                 if (common & ADVERTISE_1000XFULL) {
713                         bp->duplex = DUPLEX_FULL;
714                 }
715                 else {
716                         bp->duplex = DUPLEX_HALF;
717                 }
718         }
719
720         return 0;
721 }
722
723 static int
724 bnx2_copper_linkup(struct bnx2 *bp)
725 {
726         u32 bmcr;
727
728         bnx2_read_phy(bp, MII_BMCR, &bmcr);
729         if (bmcr & BMCR_ANENABLE) {
730                 u32 local_adv, remote_adv, common;
731
732                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
733                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
734
735                 common = local_adv & (remote_adv >> 2);
736                 if (common & ADVERTISE_1000FULL) {
737                         bp->line_speed = SPEED_1000;
738                         bp->duplex = DUPLEX_FULL;
739                 }
740                 else if (common & ADVERTISE_1000HALF) {
741                         bp->line_speed = SPEED_1000;
742                         bp->duplex = DUPLEX_HALF;
743                 }
744                 else {
745                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
746                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
747
748                         common = local_adv & remote_adv;
749                         if (common & ADVERTISE_100FULL) {
750                                 bp->line_speed = SPEED_100;
751                                 bp->duplex = DUPLEX_FULL;
752                         }
753                         else if (common & ADVERTISE_100HALF) {
754                                 bp->line_speed = SPEED_100;
755                                 bp->duplex = DUPLEX_HALF;
756                         }
757                         else if (common & ADVERTISE_10FULL) {
758                                 bp->line_speed = SPEED_10;
759                                 bp->duplex = DUPLEX_FULL;
760                         }
761                         else if (common & ADVERTISE_10HALF) {
762                                 bp->line_speed = SPEED_10;
763                                 bp->duplex = DUPLEX_HALF;
764                         }
765                         else {
766                                 bp->line_speed = 0;
767                                 bp->link_up = 0;
768                         }
769                 }
770         }
771         else {
772                 if (bmcr & BMCR_SPEED100) {
773                         bp->line_speed = SPEED_100;
774                 }
775                 else {
776                         bp->line_speed = SPEED_10;
777                 }
778                 if (bmcr & BMCR_FULLDPLX) {
779                         bp->duplex = DUPLEX_FULL;
780                 }
781                 else {
782                         bp->duplex = DUPLEX_HALF;
783                 }
784         }
785
786         return 0;
787 }
788
789 static int
790 bnx2_set_mac_link(struct bnx2 *bp)
791 {
792         u32 val;
793
794         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
795         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
796                 (bp->duplex == DUPLEX_HALF)) {
797                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
798         }
799
800         /* Configure the EMAC mode register. */
801         val = REG_RD(bp, BNX2_EMAC_MODE);
802
803         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
804                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
805                 BNX2_EMAC_MODE_25G);
806
807         if (bp->link_up) {
808                 switch (bp->line_speed) {
809                         case SPEED_10:
810                                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
811                                         val |= BNX2_EMAC_MODE_PORT_MII_10;
812                                         break;
813                                 }
814                                 /* fall through */
815                         case SPEED_100:
816                                 val |= BNX2_EMAC_MODE_PORT_MII;
817                                 break;
818                         case SPEED_2500:
819                                 val |= BNX2_EMAC_MODE_25G;
820                                 /* fall through */
821                         case SPEED_1000:
822                                 val |= BNX2_EMAC_MODE_PORT_GMII;
823                                 break;
824                 }
825         }
826         else {
827                 val |= BNX2_EMAC_MODE_PORT_GMII;
828         }
829
830         /* Set the MAC to operate in the appropriate duplex mode. */
831         if (bp->duplex == DUPLEX_HALF)
832                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
833         REG_WR(bp, BNX2_EMAC_MODE, val);
834
835         /* Enable/disable rx PAUSE. */
836         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
837
838         if (bp->flow_ctrl & FLOW_CTRL_RX)
839                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
840         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
841
842         /* Enable/disable tx PAUSE. */
843         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
844         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
845
846         if (bp->flow_ctrl & FLOW_CTRL_TX)
847                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
848         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
849
850         /* Acknowledge the interrupt. */
851         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
852
853         return 0;
854 }
855
856 static int
857 bnx2_set_link(struct bnx2 *bp)
858 {
859         u32 bmsr;
860         u8 link_up;
861
862         if (bp->loopback == MAC_LOOPBACK) {
863                 bp->link_up = 1;
864                 return 0;
865         }
866
867         link_up = bp->link_up;
868
869         bnx2_read_phy(bp, MII_BMSR, &bmsr);
870         bnx2_read_phy(bp, MII_BMSR, &bmsr);
871
872         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
873             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
874                 u32 val;
875
876                 val = REG_RD(bp, BNX2_EMAC_STATUS);
877                 if (val & BNX2_EMAC_STATUS_LINK)
878                         bmsr |= BMSR_LSTATUS;
879                 else
880                         bmsr &= ~BMSR_LSTATUS;
881         }
882
883         if (bmsr & BMSR_LSTATUS) {
884                 bp->link_up = 1;
885
886                 if (bp->phy_flags & PHY_SERDES_FLAG) {
887                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
888                                 bnx2_5706s_linkup(bp);
889                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
890                                 bnx2_5708s_linkup(bp);
891                 }
892                 else {
893                         bnx2_copper_linkup(bp);
894                 }
895                 bnx2_resolve_flow_ctrl(bp);
896         }
897         else {
898                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
899                         (bp->autoneg & AUTONEG_SPEED)) {
900
901                         u32 bmcr;
902
903                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
904                         if (!(bmcr & BMCR_ANENABLE)) {
905                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
906                                         BMCR_ANENABLE);
907                         }
908                 }
909                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
910                 bp->link_up = 0;
911         }
912
913         if (bp->link_up != link_up) {
914                 bnx2_report_link(bp);
915         }
916
917         bnx2_set_mac_link(bp);
918
919         return 0;
920 }
921
922 static int
923 bnx2_reset_phy(struct bnx2 *bp)
924 {
925         int i;
926         u32 reg;
927
928         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
929
930 #define PHY_RESET_MAX_WAIT 100
931         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
932                 udelay(10);
933
934                 bnx2_read_phy(bp, MII_BMCR, &reg);
935                 if (!(reg & BMCR_RESET)) {
936                         udelay(20);
937                         break;
938                 }
939         }
940         if (i == PHY_RESET_MAX_WAIT) {
941                 return -EBUSY;
942         }
943         return 0;
944 }
945
946 static u32
947 bnx2_phy_get_pause_adv(struct bnx2 *bp)
948 {
949         u32 adv = 0;
950
951         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
952                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
953
954                 if (bp->phy_flags & PHY_SERDES_FLAG) {
955                         adv = ADVERTISE_1000XPAUSE;
956                 }
957                 else {
958                         adv = ADVERTISE_PAUSE_CAP;
959                 }
960         }
961         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
962                 if (bp->phy_flags & PHY_SERDES_FLAG) {
963                         adv = ADVERTISE_1000XPSE_ASYM;
964                 }
965                 else {
966                         adv = ADVERTISE_PAUSE_ASYM;
967                 }
968         }
969         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
970                 if (bp->phy_flags & PHY_SERDES_FLAG) {
971                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
972                 }
973                 else {
974                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
975                 }
976         }
977         return adv;
978 }
979
980 static int
981 bnx2_setup_serdes_phy(struct bnx2 *bp)
982 {
983         u32 adv, bmcr, up1;
984         u32 new_adv = 0;
985
986         if (!(bp->autoneg & AUTONEG_SPEED)) {
987                 u32 new_bmcr;
988                 int force_link_down = 0;
989
990                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
991                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
992                         if (up1 & BCM5708S_UP1_2G5) {
993                                 up1 &= ~BCM5708S_UP1_2G5;
994                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
995                                 force_link_down = 1;
996                         }
997                 }
998
999                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1000                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1001
1002                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1003                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1004                 new_bmcr |= BMCR_SPEED1000;
1005                 if (bp->req_duplex == DUPLEX_FULL) {
1006                         adv |= ADVERTISE_1000XFULL;
1007                         new_bmcr |= BMCR_FULLDPLX;
1008                 }
1009                 else {
1010                         adv |= ADVERTISE_1000XHALF;
1011                         new_bmcr &= ~BMCR_FULLDPLX;
1012                 }
1013                 if ((new_bmcr != bmcr) || (force_link_down)) {
1014                         /* Force a link down visible on the other side */
1015                         if (bp->link_up) {
1016                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1017                                                ~(ADVERTISE_1000XFULL |
1018                                                  ADVERTISE_1000XHALF));
1019                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
1020                                         BMCR_ANRESTART | BMCR_ANENABLE);
1021
1022                                 bp->link_up = 0;
1023                                 netif_carrier_off(bp->dev);
1024                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1025                         }
1026                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
1027                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1028                 }
1029                 return 0;
1030         }
1031
1032         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1033                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1034                 up1 |= BCM5708S_UP1_2G5;
1035                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1036         }
1037
1038         if (bp->advertising & ADVERTISED_1000baseT_Full)
1039                 new_adv |= ADVERTISE_1000XFULL;
1040
1041         new_adv |= bnx2_phy_get_pause_adv(bp);
1042
1043         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1044         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1045
1046         bp->serdes_an_pending = 0;
1047         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1048                 /* Force a link down visible on the other side */
1049                 if (bp->link_up) {
1050                         int i;
1051
1052                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1053                         for (i = 0; i < 110; i++) {
1054                                 udelay(100);
1055                         }
1056                 }
1057
1058                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1059                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1060                         BMCR_ANENABLE);
1061                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1062                         /* Speed up link-up time when the link partner
1063                          * does not autonegotiate which is very common
1064                          * in blade servers. Some blade servers use
1065                          * IPMI for kerboard input and it's important
1066                          * to minimize link disruptions. Autoneg. involves
1067                          * exchanging base pages plus 3 next pages and
1068                          * normally completes in about 120 msec.
1069                          */
1070                         bp->current_interval = SERDES_AN_TIMEOUT;
1071                         bp->serdes_an_pending = 1;
1072                         mod_timer(&bp->timer, jiffies + bp->current_interval);
1073                 }
1074         }
1075
1076         return 0;
1077 }
1078
1079 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1080         (ADVERTISED_1000baseT_Full)
1081
1082 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1083         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1084         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1085         ADVERTISED_1000baseT_Full)
1086
1087 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1088         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1089         
1090 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1091
1092 static int
1093 bnx2_setup_copper_phy(struct bnx2 *bp)
1094 {
1095         u32 bmcr;
1096         u32 new_bmcr;
1097
1098         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1099
1100         if (bp->autoneg & AUTONEG_SPEED) {
1101                 u32 adv_reg, adv1000_reg;
1102                 u32 new_adv_reg = 0;
1103                 u32 new_adv1000_reg = 0;
1104
1105                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1106                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1107                         ADVERTISE_PAUSE_ASYM);
1108
1109                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1110                 adv1000_reg &= PHY_ALL_1000_SPEED;
1111
1112                 if (bp->advertising & ADVERTISED_10baseT_Half)
1113                         new_adv_reg |= ADVERTISE_10HALF;
1114                 if (bp->advertising & ADVERTISED_10baseT_Full)
1115                         new_adv_reg |= ADVERTISE_10FULL;
1116                 if (bp->advertising & ADVERTISED_100baseT_Half)
1117                         new_adv_reg |= ADVERTISE_100HALF;
1118                 if (bp->advertising & ADVERTISED_100baseT_Full)
1119                         new_adv_reg |= ADVERTISE_100FULL;
1120                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1121                         new_adv1000_reg |= ADVERTISE_1000FULL;
1122                 
1123                 new_adv_reg |= ADVERTISE_CSMA;
1124
1125                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1126
1127                 if ((adv1000_reg != new_adv1000_reg) ||
1128                         (adv_reg != new_adv_reg) ||
1129                         ((bmcr & BMCR_ANENABLE) == 0)) {
1130
1131                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1132                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1133                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1134                                 BMCR_ANENABLE);
1135                 }
1136                 else if (bp->link_up) {
1137                         /* Flow ctrl may have changed from auto to forced */
1138                         /* or vice-versa. */
1139
1140                         bnx2_resolve_flow_ctrl(bp);
1141                         bnx2_set_mac_link(bp);
1142                 }
1143                 return 0;
1144         }
1145
1146         new_bmcr = 0;
1147         if (bp->req_line_speed == SPEED_100) {
1148                 new_bmcr |= BMCR_SPEED100;
1149         }
1150         if (bp->req_duplex == DUPLEX_FULL) {
1151                 new_bmcr |= BMCR_FULLDPLX;
1152         }
1153         if (new_bmcr != bmcr) {
1154                 u32 bmsr;
1155                 int i = 0;
1156
1157                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1158                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1159                 
1160                 if (bmsr & BMSR_LSTATUS) {
1161                         /* Force link down */
1162                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1163                         do {
1164                                 udelay(100);
1165                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1166                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1167                                 i++;
1168                         } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1169                 }
1170
1171                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1172
1173                 /* Normally, the new speed is setup after the link has
1174                  * gone down and up again. In some cases, link will not go
1175                  * down so we need to set up the new speed here.
1176                  */
1177                 if (bmsr & BMSR_LSTATUS) {
1178                         bp->line_speed = bp->req_line_speed;
1179                         bp->duplex = bp->req_duplex;
1180                         bnx2_resolve_flow_ctrl(bp);
1181                         bnx2_set_mac_link(bp);
1182                 }
1183         }
1184         return 0;
1185 }
1186
1187 static int
1188 bnx2_setup_phy(struct bnx2 *bp)
1189 {
1190         if (bp->loopback == MAC_LOOPBACK)
1191                 return 0;
1192
1193         if (bp->phy_flags & PHY_SERDES_FLAG) {
1194                 return (bnx2_setup_serdes_phy(bp));
1195         }
1196         else {
1197                 return (bnx2_setup_copper_phy(bp));
1198         }
1199 }
1200
1201 static int
1202 bnx2_init_5708s_phy(struct bnx2 *bp)
1203 {
1204         u32 val;
1205
1206         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1207         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1208         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1209
1210         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1211         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1212         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1213
1214         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1215         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1216         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1217
1218         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1219                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1220                 val |= BCM5708S_UP1_2G5;
1221                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1222         }
1223
1224         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1225             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1226             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1227                 /* increase tx signal amplitude */
1228                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1229                                BCM5708S_BLK_ADDR_TX_MISC);
1230                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1231                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1232                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1233                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1234         }
1235
1236         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1237               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1238
1239         if (val) {
1240                 u32 is_backplane;
1241
1242                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1243                                           BNX2_SHARED_HW_CFG_CONFIG);
1244                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1245                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1246                                        BCM5708S_BLK_ADDR_TX_MISC);
1247                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1248                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1249                                        BCM5708S_BLK_ADDR_DIG);
1250                 }
1251         }
1252         return 0;
1253 }
1254
1255 static int
1256 bnx2_init_5706s_phy(struct bnx2 *bp)
1257 {
1258         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1259
1260         if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1261                 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1262         }
1263
1264         if (bp->dev->mtu > 1500) {
1265                 u32 val;
1266
1267                 /* Set extended packet length bit */
1268                 bnx2_write_phy(bp, 0x18, 0x7);
1269                 bnx2_read_phy(bp, 0x18, &val);
1270                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1271
1272                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1273                 bnx2_read_phy(bp, 0x1c, &val);
1274                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1275         }
1276         else {
1277                 u32 val;
1278
1279                 bnx2_write_phy(bp, 0x18, 0x7);
1280                 bnx2_read_phy(bp, 0x18, &val);
1281                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1282
1283                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1284                 bnx2_read_phy(bp, 0x1c, &val);
1285                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1286         }
1287
1288         return 0;
1289 }
1290
1291 static int
1292 bnx2_init_copper_phy(struct bnx2 *bp)
1293 {
1294         u32 val;
1295
1296         bp->phy_flags |= PHY_CRC_FIX_FLAG;
1297
1298         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1299                 bnx2_write_phy(bp, 0x18, 0x0c00);
1300                 bnx2_write_phy(bp, 0x17, 0x000a);
1301                 bnx2_write_phy(bp, 0x15, 0x310b);
1302                 bnx2_write_phy(bp, 0x17, 0x201f);
1303                 bnx2_write_phy(bp, 0x15, 0x9506);
1304                 bnx2_write_phy(bp, 0x17, 0x401f);
1305                 bnx2_write_phy(bp, 0x15, 0x14e2);
1306                 bnx2_write_phy(bp, 0x18, 0x0400);
1307         }
1308
1309         if (bp->dev->mtu > 1500) {
1310                 /* Set extended packet length bit */
1311                 bnx2_write_phy(bp, 0x18, 0x7);
1312                 bnx2_read_phy(bp, 0x18, &val);
1313                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1314
1315                 bnx2_read_phy(bp, 0x10, &val);
1316                 bnx2_write_phy(bp, 0x10, val | 0x1);
1317         }
1318         else {
1319                 bnx2_write_phy(bp, 0x18, 0x7);
1320                 bnx2_read_phy(bp, 0x18, &val);
1321                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1322
1323                 bnx2_read_phy(bp, 0x10, &val);
1324                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1325         }
1326
1327         /* ethernet@wirespeed */
1328         bnx2_write_phy(bp, 0x18, 0x7007);
1329         bnx2_read_phy(bp, 0x18, &val);
1330         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1331         return 0;
1332 }
1333
1334
1335 static int
1336 bnx2_init_phy(struct bnx2 *bp)
1337 {
1338         u32 val;
1339         int rc = 0;
1340
1341         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1342         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1343
1344         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1345
1346         bnx2_reset_phy(bp);
1347
1348         bnx2_read_phy(bp, MII_PHYSID1, &val);
1349         bp->phy_id = val << 16;
1350         bnx2_read_phy(bp, MII_PHYSID2, &val);
1351         bp->phy_id |= val & 0xffff;
1352
1353         if (bp->phy_flags & PHY_SERDES_FLAG) {
1354                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1355                         rc = bnx2_init_5706s_phy(bp);
1356                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1357                         rc = bnx2_init_5708s_phy(bp);
1358         }
1359         else {
1360                 rc = bnx2_init_copper_phy(bp);
1361         }
1362
1363         bnx2_setup_phy(bp);
1364
1365         return rc;
1366 }
1367
1368 static int
1369 bnx2_set_mac_loopback(struct bnx2 *bp)
1370 {
1371         u32 mac_mode;
1372
1373         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1374         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1375         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1376         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1377         bp->link_up = 1;
1378         return 0;
1379 }
1380
1381 static int bnx2_test_link(struct bnx2 *);
1382
1383 static int
1384 bnx2_set_phy_loopback(struct bnx2 *bp)
1385 {
1386         u32 mac_mode;
1387         int rc, i;
1388
1389         spin_lock_bh(&bp->phy_lock);
1390         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1391                             BMCR_SPEED1000);
1392         spin_unlock_bh(&bp->phy_lock);
1393         if (rc)
1394                 return rc;
1395
1396         for (i = 0; i < 10; i++) {
1397                 if (bnx2_test_link(bp) == 0)
1398                         break;
1399                 udelay(10);
1400         }
1401
1402         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1403         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1404                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1405                       BNX2_EMAC_MODE_25G);
1406
1407         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1408         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1409         bp->link_up = 1;
1410         return 0;
1411 }
1412
1413 static int
1414 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1415 {
1416         int i;
1417         u32 val;
1418
1419         bp->fw_wr_seq++;
1420         msg_data |= bp->fw_wr_seq;
1421
1422         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1423
1424         /* wait for an acknowledgement. */
1425         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1426                 msleep(10);
1427
1428                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1429
1430                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1431                         break;
1432         }
1433         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1434                 return 0;
1435
1436         /* If we timed out, inform the firmware that this is the case. */
1437         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1438                 if (!silent)
1439                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1440                                             "%x\n", msg_data);
1441
1442                 msg_data &= ~BNX2_DRV_MSG_CODE;
1443                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1444
1445                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1446
1447                 return -EBUSY;
1448         }
1449
1450         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1451                 return -EIO;
1452
1453         return 0;
1454 }
1455
1456 static void
1457 bnx2_init_context(struct bnx2 *bp)
1458 {
1459         u32 vcid;
1460
1461         vcid = 96;
1462         while (vcid) {
1463                 u32 vcid_addr, pcid_addr, offset;
1464
1465                 vcid--;
1466
1467                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1468                         u32 new_vcid;
1469
1470                         vcid_addr = GET_PCID_ADDR(vcid);
1471                         if (vcid & 0x8) {
1472                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1473                         }
1474                         else {
1475                                 new_vcid = vcid;
1476                         }
1477                         pcid_addr = GET_PCID_ADDR(new_vcid);
1478                 }
1479                 else {
1480                         vcid_addr = GET_CID_ADDR(vcid);
1481                         pcid_addr = vcid_addr;
1482                 }
1483
1484                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1485                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1486
1487                 /* Zero out the context. */
1488                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1489                         CTX_WR(bp, 0x00, offset, 0);
1490                 }
1491
1492                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1493                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1494         }
1495 }
1496
1497 static int
1498 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1499 {
1500         u16 *good_mbuf;
1501         u32 good_mbuf_cnt;
1502         u32 val;
1503
1504         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1505         if (good_mbuf == NULL) {
1506                 printk(KERN_ERR PFX "Failed to allocate memory in "
1507                                     "bnx2_alloc_bad_rbuf\n");
1508                 return -ENOMEM;
1509         }
1510
1511         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1512                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1513
1514         good_mbuf_cnt = 0;
1515
1516         /* Allocate a bunch of mbufs and save the good ones in an array. */
1517         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1518         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1519                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1520
1521                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1522
1523                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1524
1525                 /* The addresses with Bit 9 set are bad memory blocks. */
1526                 if (!(val & (1 << 9))) {
1527                         good_mbuf[good_mbuf_cnt] = (u16) val;
1528                         good_mbuf_cnt++;
1529                 }
1530
1531                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1532         }
1533
1534         /* Free the good ones back to the mbuf pool thus discarding
1535          * all the bad ones. */
1536         while (good_mbuf_cnt) {
1537                 good_mbuf_cnt--;
1538
1539                 val = good_mbuf[good_mbuf_cnt];
1540                 val = (val << 9) | val | 1;
1541
1542                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1543         }
1544         kfree(good_mbuf);
1545         return 0;
1546 }
1547
1548 static void
1549 bnx2_set_mac_addr(struct bnx2 *bp) 
1550 {
1551         u32 val;
1552         u8 *mac_addr = bp->dev->dev_addr;
1553
1554         val = (mac_addr[0] << 8) | mac_addr[1];
1555
1556         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1557
1558         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 
1559                 (mac_addr[4] << 8) | mac_addr[5];
1560
1561         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1562 }
1563
1564 static inline int
1565 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1566 {
1567         struct sk_buff *skb;
1568         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1569         dma_addr_t mapping;
1570         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1571         unsigned long align;
1572
1573         skb = dev_alloc_skb(bp->rx_buf_size);
1574         if (skb == NULL) {
1575                 return -ENOMEM;
1576         }
1577
1578         if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1579                 skb_reserve(skb, 8 - align);
1580         }
1581
1582         skb->dev = bp->dev;
1583         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1584                 PCI_DMA_FROMDEVICE);
1585
1586         rx_buf->skb = skb;
1587         pci_unmap_addr_set(rx_buf, mapping, mapping);
1588
1589         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1590         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1591
1592         bp->rx_prod_bseq += bp->rx_buf_use_size;
1593
1594         return 0;
1595 }
1596
1597 static void
1598 bnx2_phy_int(struct bnx2 *bp)
1599 {
1600         u32 new_link_state, old_link_state;
1601
1602         new_link_state = bp->status_blk->status_attn_bits &
1603                 STATUS_ATTN_BITS_LINK_STATE;
1604         old_link_state = bp->status_blk->status_attn_bits_ack &
1605                 STATUS_ATTN_BITS_LINK_STATE;
1606         if (new_link_state != old_link_state) {
1607                 if (new_link_state) {
1608                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1609                                 STATUS_ATTN_BITS_LINK_STATE);
1610                 }
1611                 else {
1612                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1613                                 STATUS_ATTN_BITS_LINK_STATE);
1614                 }
1615                 bnx2_set_link(bp);
1616         }
1617 }
1618
1619 static void
1620 bnx2_tx_int(struct bnx2 *bp)
1621 {
1622         struct status_block *sblk = bp->status_blk;
1623         u16 hw_cons, sw_cons, sw_ring_cons;
1624         int tx_free_bd = 0;
1625
1626         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1627         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1628                 hw_cons++;
1629         }
1630         sw_cons = bp->tx_cons;
1631
1632         while (sw_cons != hw_cons) {
1633                 struct sw_bd *tx_buf;
1634                 struct sk_buff *skb;
1635                 int i, last;
1636
1637                 sw_ring_cons = TX_RING_IDX(sw_cons);
1638
1639                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1640                 skb = tx_buf->skb;
1641 #ifdef BCM_TSO 
1642                 /* partial BD completions possible with TSO packets */
1643                 if (skb_shinfo(skb)->gso_size) {
1644                         u16 last_idx, last_ring_idx;
1645
1646                         last_idx = sw_cons +
1647                                 skb_shinfo(skb)->nr_frags + 1;
1648                         last_ring_idx = sw_ring_cons +
1649                                 skb_shinfo(skb)->nr_frags + 1;
1650                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1651                                 last_idx++;
1652                         }
1653                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1654                                 break;
1655                         }
1656                 }
1657 #endif
1658                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1659                         skb_headlen(skb), PCI_DMA_TODEVICE);
1660
1661                 tx_buf->skb = NULL;
1662                 last = skb_shinfo(skb)->nr_frags;
1663
1664                 for (i = 0; i < last; i++) {
1665                         sw_cons = NEXT_TX_BD(sw_cons);
1666
1667                         pci_unmap_page(bp->pdev,
1668                                 pci_unmap_addr(
1669                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1670                                         mapping),
1671                                 skb_shinfo(skb)->frags[i].size,
1672                                 PCI_DMA_TODEVICE);
1673                 }
1674
1675                 sw_cons = NEXT_TX_BD(sw_cons);
1676
1677                 tx_free_bd += last + 1;
1678
1679                 dev_kfree_skb(skb);
1680
1681                 hw_cons = bp->hw_tx_cons =
1682                         sblk->status_tx_quick_consumer_index0;
1683
1684                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1685                         hw_cons++;
1686                 }
1687         }
1688
1689         bp->tx_cons = sw_cons;
1690
1691         if (unlikely(netif_queue_stopped(bp->dev))) {
1692                 spin_lock(&bp->tx_lock);
1693                 if ((netif_queue_stopped(bp->dev)) &&
1694                     (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1695
1696                         netif_wake_queue(bp->dev);
1697                 }
1698                 spin_unlock(&bp->tx_lock);
1699         }
1700 }
1701
1702 static inline void
1703 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1704         u16 cons, u16 prod)
1705 {
1706         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1707         struct rx_bd *cons_bd, *prod_bd;
1708
1709         cons_rx_buf = &bp->rx_buf_ring[cons];
1710         prod_rx_buf = &bp->rx_buf_ring[prod];
1711
1712         pci_dma_sync_single_for_device(bp->pdev,
1713                 pci_unmap_addr(cons_rx_buf, mapping),
1714                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1715
1716         bp->rx_prod_bseq += bp->rx_buf_use_size;
1717
1718         prod_rx_buf->skb = skb;
1719
1720         if (cons == prod)
1721                 return;
1722
1723         pci_unmap_addr_set(prod_rx_buf, mapping,
1724                         pci_unmap_addr(cons_rx_buf, mapping));
1725
1726         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1727         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1728         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1729         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1730 }
1731
1732 static int
1733 bnx2_rx_int(struct bnx2 *bp, int budget)
1734 {
1735         struct status_block *sblk = bp->status_blk;
1736         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1737         struct l2_fhdr *rx_hdr;
1738         int rx_pkt = 0;
1739
1740         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1741         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1742                 hw_cons++;
1743         }
1744         sw_cons = bp->rx_cons;
1745         sw_prod = bp->rx_prod;
1746
1747         /* Memory barrier necessary as speculative reads of the rx
1748          * buffer can be ahead of the index in the status block
1749          */
1750         rmb();
1751         while (sw_cons != hw_cons) {
1752                 unsigned int len;
1753                 u32 status;
1754                 struct sw_bd *rx_buf;
1755                 struct sk_buff *skb;
1756                 dma_addr_t dma_addr;
1757
1758                 sw_ring_cons = RX_RING_IDX(sw_cons);
1759                 sw_ring_prod = RX_RING_IDX(sw_prod);
1760
1761                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1762                 skb = rx_buf->skb;
1763
1764                 rx_buf->skb = NULL;
1765
1766                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1767
1768                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1769                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1770
1771                 rx_hdr = (struct l2_fhdr *) skb->data;
1772                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1773
1774                 if ((status = rx_hdr->l2_fhdr_status) &
1775                         (L2_FHDR_ERRORS_BAD_CRC |
1776                         L2_FHDR_ERRORS_PHY_DECODE |
1777                         L2_FHDR_ERRORS_ALIGNMENT |
1778                         L2_FHDR_ERRORS_TOO_SHORT |
1779                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1780
1781                         goto reuse_rx;
1782                 }
1783
1784                 /* Since we don't have a jumbo ring, copy small packets
1785                  * if mtu > 1500
1786                  */
1787                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1788                         struct sk_buff *new_skb;
1789
1790                         new_skb = dev_alloc_skb(len + 2);
1791                         if (new_skb == NULL)
1792                                 goto reuse_rx;
1793
1794                         /* aligned copy */
1795                         memcpy(new_skb->data,
1796                                 skb->data + bp->rx_offset - 2,
1797                                 len + 2);
1798
1799                         skb_reserve(new_skb, 2);
1800                         skb_put(new_skb, len);
1801                         new_skb->dev = bp->dev;
1802
1803                         bnx2_reuse_rx_skb(bp, skb,
1804                                 sw_ring_cons, sw_ring_prod);
1805
1806                         skb = new_skb;
1807                 }
1808                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1809                         pci_unmap_single(bp->pdev, dma_addr,
1810                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1811
1812                         skb_reserve(skb, bp->rx_offset);
1813                         skb_put(skb, len);
1814                 }
1815                 else {
1816 reuse_rx:
1817                         bnx2_reuse_rx_skb(bp, skb,
1818                                 sw_ring_cons, sw_ring_prod);
1819                         goto next_rx;
1820                 }
1821
1822                 skb->protocol = eth_type_trans(skb, bp->dev);
1823
1824                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1825                         (ntohs(skb->protocol) != 0x8100)) {
1826
1827                         dev_kfree_skb(skb);
1828                         goto next_rx;
1829
1830                 }
1831
1832                 skb->ip_summed = CHECKSUM_NONE;
1833                 if (bp->rx_csum &&
1834                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1835                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1836
1837                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1838                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1839                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1840                 }
1841
1842 #ifdef BCM_VLAN
1843                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1844                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1845                                 rx_hdr->l2_fhdr_vlan_tag);
1846                 }
1847                 else
1848 #endif
1849                         netif_receive_skb(skb);
1850
1851                 bp->dev->last_rx = jiffies;
1852                 rx_pkt++;
1853
1854 next_rx:
1855                 sw_cons = NEXT_RX_BD(sw_cons);
1856                 sw_prod = NEXT_RX_BD(sw_prod);
1857
1858                 if ((rx_pkt == budget))
1859                         break;
1860
1861                 /* Refresh hw_cons to see if there is new work */
1862                 if (sw_cons == hw_cons) {
1863                         hw_cons = bp->hw_rx_cons =
1864                                 sblk->status_rx_quick_consumer_index0;
1865                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1866                                 hw_cons++;
1867                         rmb();
1868                 }
1869         }
1870         bp->rx_cons = sw_cons;
1871         bp->rx_prod = sw_prod;
1872
1873         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1874
1875         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1876
1877         mmiowb();
1878
1879         return rx_pkt;
1880
1881 }
1882
1883 /* MSI ISR - The only difference between this and the INTx ISR
1884  * is that the MSI interrupt is always serviced.
1885  */
1886 static irqreturn_t
1887 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1888 {
1889         struct net_device *dev = dev_instance;
1890         struct bnx2 *bp = netdev_priv(dev);
1891
1892         prefetch(bp->status_blk);
1893         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1894                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1895                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1896
1897         /* Return here if interrupt is disabled. */
1898         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1899                 return IRQ_HANDLED;
1900
1901         netif_rx_schedule(dev);
1902
1903         return IRQ_HANDLED;
1904 }
1905
1906 static irqreturn_t
1907 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1908 {
1909         struct net_device *dev = dev_instance;
1910         struct bnx2 *bp = netdev_priv(dev);
1911
1912         /* When using INTx, it is possible for the interrupt to arrive
1913          * at the CPU before the status block posted prior to the
1914          * interrupt. Reading a register will flush the status block.
1915          * When using MSI, the MSI message will always complete after
1916          * the status block write.
1917          */
1918         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1919             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1920              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1921                 return IRQ_NONE;
1922
1923         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1924                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1925                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1926
1927         /* Return here if interrupt is shared and is disabled. */
1928         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1929                 return IRQ_HANDLED;
1930
1931         netif_rx_schedule(dev);
1932
1933         return IRQ_HANDLED;
1934 }
1935
1936 static inline int
1937 bnx2_has_work(struct bnx2 *bp)
1938 {
1939         struct status_block *sblk = bp->status_blk;
1940
1941         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1942             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1943                 return 1;
1944
1945         if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1946             bp->link_up)
1947                 return 1;
1948
1949         return 0;
1950 }
1951
1952 static int
1953 bnx2_poll(struct net_device *dev, int *budget)
1954 {
1955         struct bnx2 *bp = netdev_priv(dev);
1956
1957         if ((bp->status_blk->status_attn_bits &
1958                 STATUS_ATTN_BITS_LINK_STATE) !=
1959                 (bp->status_blk->status_attn_bits_ack &
1960                 STATUS_ATTN_BITS_LINK_STATE)) {
1961
1962                 spin_lock(&bp->phy_lock);
1963                 bnx2_phy_int(bp);
1964                 spin_unlock(&bp->phy_lock);
1965
1966                 /* This is needed to take care of transient status
1967                  * during link changes.
1968                  */
1969                 REG_WR(bp, BNX2_HC_COMMAND,
1970                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1971                 REG_RD(bp, BNX2_HC_COMMAND);
1972         }
1973
1974         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1975                 bnx2_tx_int(bp);
1976
1977         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1978                 int orig_budget = *budget;
1979                 int work_done;
1980
1981                 if (orig_budget > dev->quota)
1982                         orig_budget = dev->quota;
1983                 
1984                 work_done = bnx2_rx_int(bp, orig_budget);
1985                 *budget -= work_done;
1986                 dev->quota -= work_done;
1987         }
1988         
1989         bp->last_status_idx = bp->status_blk->status_idx;
1990         rmb();
1991
1992         if (!bnx2_has_work(bp)) {
1993                 netif_rx_complete(dev);
1994                 if (likely(bp->flags & USING_MSI_FLAG)) {
1995                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1996                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1997                                bp->last_status_idx);
1998                         return 0;
1999                 }
2000                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2001                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2002                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2003                        bp->last_status_idx);
2004
2005                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2006                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2007                        bp->last_status_idx);
2008                 return 0;
2009         }
2010
2011         return 1;
2012 }
2013
2014 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2015  * from set_multicast.
2016  */
2017 static void
2018 bnx2_set_rx_mode(struct net_device *dev)
2019 {
2020         struct bnx2 *bp = netdev_priv(dev);
2021         u32 rx_mode, sort_mode;
2022         int i;
2023
2024         spin_lock_bh(&bp->phy_lock);
2025
2026         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2027                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2028         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2029 #ifdef BCM_VLAN
2030         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2031                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2032 #else
2033         if (!(bp->flags & ASF_ENABLE_FLAG))
2034                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2035 #endif
2036         if (dev->flags & IFF_PROMISC) {
2037                 /* Promiscuous mode. */
2038                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2039                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
2040         }
2041         else if (dev->flags & IFF_ALLMULTI) {
2042                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2043                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2044                                0xffffffff);
2045                 }
2046                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2047         }
2048         else {
2049                 /* Accept one or more multicast(s). */
2050                 struct dev_mc_list *mclist;
2051                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2052                 u32 regidx;
2053                 u32 bit;
2054                 u32 crc;
2055
2056                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2057
2058                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2059                      i++, mclist = mclist->next) {
2060
2061                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2062                         bit = crc & 0xff;
2063                         regidx = (bit & 0xe0) >> 5;
2064                         bit &= 0x1f;
2065                         mc_filter[regidx] |= (1 << bit);
2066                 }
2067
2068                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2069                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2070                                mc_filter[i]);
2071                 }
2072
2073                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2074         }
2075
2076         if (rx_mode != bp->rx_mode) {
2077                 bp->rx_mode = rx_mode;
2078                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2079         }
2080
2081         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2082         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2083         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2084
2085         spin_unlock_bh(&bp->phy_lock);
2086 }
2087
2088 #define FW_BUF_SIZE     0x8000
2089
2090 static int
2091 bnx2_gunzip_init(struct bnx2 *bp)
2092 {
2093         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2094                 goto gunzip_nomem1;
2095
2096         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2097                 goto gunzip_nomem2;
2098
2099         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2100         if (bp->strm->workspace == NULL)
2101                 goto gunzip_nomem3;
2102
2103         return 0;
2104
2105 gunzip_nomem3:
2106         kfree(bp->strm);
2107         bp->strm = NULL;
2108
2109 gunzip_nomem2:
2110         vfree(bp->gunzip_buf);
2111         bp->gunzip_buf = NULL;
2112
2113 gunzip_nomem1:
2114         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2115                             "uncompression.\n", bp->dev->name);
2116         return -ENOMEM;
2117 }
2118
2119 static void
2120 bnx2_gunzip_end(struct bnx2 *bp)
2121 {
2122         kfree(bp->strm->workspace);
2123
2124         kfree(bp->strm);
2125         bp->strm = NULL;
2126
2127         if (bp->gunzip_buf) {
2128                 vfree(bp->gunzip_buf);
2129                 bp->gunzip_buf = NULL;
2130         }
2131 }
2132
2133 static int
2134 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2135 {
2136         int n, rc;
2137
2138         /* check gzip header */
2139         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2140                 return -EINVAL;
2141
2142         n = 10;
2143
2144 #define FNAME   0x8
2145         if (zbuf[3] & FNAME)
2146                 while ((zbuf[n++] != 0) && (n < len));
2147
2148         bp->strm->next_in = zbuf + n;
2149         bp->strm->avail_in = len - n;
2150         bp->strm->next_out = bp->gunzip_buf;
2151         bp->strm->avail_out = FW_BUF_SIZE;
2152
2153         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2154         if (rc != Z_OK)
2155                 return rc;
2156
2157         rc = zlib_inflate(bp->strm, Z_FINISH);
2158
2159         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2160         *outbuf = bp->gunzip_buf;
2161
2162         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2163                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2164                        bp->dev->name, bp->strm->msg);
2165
2166         zlib_inflateEnd(bp->strm);
2167
2168         if (rc == Z_STREAM_END)
2169                 return 0;
2170
2171         return rc;
2172 }
2173
2174 static void
2175 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2176         u32 rv2p_proc)
2177 {
2178         int i;
2179         u32 val;
2180
2181
2182         for (i = 0; i < rv2p_code_len; i += 8) {
2183                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2184                 rv2p_code++;
2185                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2186                 rv2p_code++;
2187
2188                 if (rv2p_proc == RV2P_PROC1) {
2189                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2190                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2191                 }
2192                 else {
2193                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2194                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2195                 }
2196         }
2197
2198         /* Reset the processor, un-stall is done later. */
2199         if (rv2p_proc == RV2P_PROC1) {
2200                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2201         }
2202         else {
2203                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2204         }
2205 }
2206
2207 static void
2208 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2209 {
2210         u32 offset;
2211         u32 val;
2212
2213         /* Halt the CPU. */
2214         val = REG_RD_IND(bp, cpu_reg->mode);
2215         val |= cpu_reg->mode_value_halt;
2216         REG_WR_IND(bp, cpu_reg->mode, val);
2217         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2218
2219         /* Load the Text area. */
2220         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2221         if (fw->text) {
2222                 int j;
2223
2224                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2225                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2226                 }
2227         }
2228
2229         /* Load the Data area. */
2230         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2231         if (fw->data) {
2232                 int j;
2233
2234                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2235                         REG_WR_IND(bp, offset, fw->data[j]);
2236                 }
2237         }
2238
2239         /* Load the SBSS area. */
2240         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2241         if (fw->sbss) {
2242                 int j;
2243
2244                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2245                         REG_WR_IND(bp, offset, fw->sbss[j]);
2246                 }
2247         }
2248
2249         /* Load the BSS area. */
2250         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2251         if (fw->bss) {
2252                 int j;
2253
2254                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2255                         REG_WR_IND(bp, offset, fw->bss[j]);
2256                 }
2257         }
2258
2259         /* Load the Read-Only area. */
2260         offset = cpu_reg->spad_base +
2261                 (fw->rodata_addr - cpu_reg->mips_view_base);
2262         if (fw->rodata) {
2263                 int j;
2264
2265                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2266                         REG_WR_IND(bp, offset, fw->rodata[j]);
2267                 }
2268         }
2269
2270         /* Clear the pre-fetch instruction. */
2271         REG_WR_IND(bp, cpu_reg->inst, 0);
2272         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2273
2274         /* Start the CPU. */
2275         val = REG_RD_IND(bp, cpu_reg->mode);
2276         val &= ~cpu_reg->mode_value_halt;
2277         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2278         REG_WR_IND(bp, cpu_reg->mode, val);
2279 }
2280
2281 static int
2282 bnx2_init_cpus(struct bnx2 *bp)
2283 {
2284         struct cpu_reg cpu_reg;
2285         struct fw_info fw;
2286         int rc = 0;
2287         void *text;
2288         u32 text_len;
2289
2290         if ((rc = bnx2_gunzip_init(bp)) != 0)
2291                 return rc;
2292
2293         /* Initialize the RV2P processor. */
2294         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2295                          &text_len);
2296         if (rc)
2297                 goto init_cpu_err;
2298
2299         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2300
2301         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2302                          &text_len);
2303         if (rc)
2304                 goto init_cpu_err;
2305
2306         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2307
2308         /* Initialize the RX Processor. */
2309         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2310         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2311         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2312         cpu_reg.state = BNX2_RXP_CPU_STATE;
2313         cpu_reg.state_value_clear = 0xffffff;
2314         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2315         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2316         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2317         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2318         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2319         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2320         cpu_reg.mips_view_base = 0x8000000;
2321     
2322         fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2323         fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2324         fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2325         fw.start_addr = bnx2_RXP_b06FwStartAddr;
2326
2327         fw.text_addr = bnx2_RXP_b06FwTextAddr;
2328         fw.text_len = bnx2_RXP_b06FwTextLen;
2329         fw.text_index = 0;
2330
2331         rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2332                          &text, &text_len);
2333         if (rc)
2334                 goto init_cpu_err;
2335
2336         fw.text = text;
2337
2338         fw.data_addr = bnx2_RXP_b06FwDataAddr;
2339         fw.data_len = bnx2_RXP_b06FwDataLen;
2340         fw.data_index = 0;
2341         fw.data = bnx2_RXP_b06FwData;
2342
2343         fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2344         fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2345         fw.sbss_index = 0;
2346         fw.sbss = bnx2_RXP_b06FwSbss;
2347
2348         fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2349         fw.bss_len = bnx2_RXP_b06FwBssLen;
2350         fw.bss_index = 0;
2351         fw.bss = bnx2_RXP_b06FwBss;
2352
2353         fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2354         fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2355         fw.rodata_index = 0;
2356         fw.rodata = bnx2_RXP_b06FwRodata;
2357
2358         load_cpu_fw(bp, &cpu_reg, &fw);
2359
2360         /* Initialize the TX Processor. */
2361         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2362         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2363         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2364         cpu_reg.state = BNX2_TXP_CPU_STATE;
2365         cpu_reg.state_value_clear = 0xffffff;
2366         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2367         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2368         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2369         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2370         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2371         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2372         cpu_reg.mips_view_base = 0x8000000;
2373     
2374         fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2375         fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2376         fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2377         fw.start_addr = bnx2_TXP_b06FwStartAddr;
2378
2379         fw.text_addr = bnx2_TXP_b06FwTextAddr;
2380         fw.text_len = bnx2_TXP_b06FwTextLen;
2381         fw.text_index = 0;
2382
2383         rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2384                          &text, &text_len);
2385         if (rc)
2386                 goto init_cpu_err;
2387
2388         fw.text = text;
2389
2390         fw.data_addr = bnx2_TXP_b06FwDataAddr;
2391         fw.data_len = bnx2_TXP_b06FwDataLen;
2392         fw.data_index = 0;
2393         fw.data = bnx2_TXP_b06FwData;
2394
2395         fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2396         fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2397         fw.sbss_index = 0;
2398         fw.sbss = bnx2_TXP_b06FwSbss;
2399
2400         fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2401         fw.bss_len = bnx2_TXP_b06FwBssLen;
2402         fw.bss_index = 0;
2403         fw.bss = bnx2_TXP_b06FwBss;
2404
2405         fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2406         fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2407         fw.rodata_index = 0;
2408         fw.rodata = bnx2_TXP_b06FwRodata;
2409
2410         load_cpu_fw(bp, &cpu_reg, &fw);
2411
2412         /* Initialize the TX Patch-up Processor. */
2413         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2414         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2415         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2416         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2417         cpu_reg.state_value_clear = 0xffffff;
2418         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2419         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2420         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2421         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2422         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2423         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2424         cpu_reg.mips_view_base = 0x8000000;
2425     
2426         fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2427         fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2428         fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2429         fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2430
2431         fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2432         fw.text_len = bnx2_TPAT_b06FwTextLen;
2433         fw.text_index = 0;
2434
2435         rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2436                          &text, &text_len);
2437         if (rc)
2438                 goto init_cpu_err;
2439
2440         fw.text = text;
2441
2442         fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2443         fw.data_len = bnx2_TPAT_b06FwDataLen;
2444         fw.data_index = 0;
2445         fw.data = bnx2_TPAT_b06FwData;
2446
2447         fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2448         fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2449         fw.sbss_index = 0;
2450         fw.sbss = bnx2_TPAT_b06FwSbss;
2451
2452         fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2453         fw.bss_len = bnx2_TPAT_b06FwBssLen;
2454         fw.bss_index = 0;
2455         fw.bss = bnx2_TPAT_b06FwBss;
2456
2457         fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2458         fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2459         fw.rodata_index = 0;
2460         fw.rodata = bnx2_TPAT_b06FwRodata;
2461
2462         load_cpu_fw(bp, &cpu_reg, &fw);
2463
2464         /* Initialize the Completion Processor. */
2465         cpu_reg.mode = BNX2_COM_CPU_MODE;
2466         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2467         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2468         cpu_reg.state = BNX2_COM_CPU_STATE;
2469         cpu_reg.state_value_clear = 0xffffff;
2470         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2471         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2472         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2473         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2474         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2475         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2476         cpu_reg.mips_view_base = 0x8000000;
2477     
2478         fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2479         fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2480         fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2481         fw.start_addr = bnx2_COM_b06FwStartAddr;
2482
2483         fw.text_addr = bnx2_COM_b06FwTextAddr;
2484         fw.text_len = bnx2_COM_b06FwTextLen;
2485         fw.text_index = 0;
2486
2487         rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2488                          &text, &text_len);
2489         if (rc)
2490                 goto init_cpu_err;
2491
2492         fw.text = text;
2493
2494         fw.data_addr = bnx2_COM_b06FwDataAddr;
2495         fw.data_len = bnx2_COM_b06FwDataLen;
2496         fw.data_index = 0;
2497         fw.data = bnx2_COM_b06FwData;
2498
2499         fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2500         fw.sbss_len = bnx2_COM_b06FwSbssLen;
2501         fw.sbss_index = 0;
2502         fw.sbss = bnx2_COM_b06FwSbss;
2503
2504         fw.bss_addr = bnx2_COM_b06FwBssAddr;
2505         fw.bss_len = bnx2_COM_b06FwBssLen;
2506         fw.bss_index = 0;
2507         fw.bss = bnx2_COM_b06FwBss;
2508
2509         fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2510         fw.rodata_len = bnx2_COM_b06FwRodataLen;
2511         fw.rodata_index = 0;
2512         fw.rodata = bnx2_COM_b06FwRodata;
2513
2514         load_cpu_fw(bp, &cpu_reg, &fw);
2515
2516 init_cpu_err:
2517         bnx2_gunzip_end(bp);
2518         return rc;
2519 }
2520
2521 static int
2522 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2523 {
2524         u16 pmcsr;
2525
2526         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2527
2528         switch (state) {
2529         case PCI_D0: {
2530                 u32 val;
2531
2532                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2533                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2534                         PCI_PM_CTRL_PME_STATUS);
2535
2536                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2537                         /* delay required during transition out of D3hot */
2538                         msleep(20);
2539
2540                 val = REG_RD(bp, BNX2_EMAC_MODE);
2541                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2542                 val &= ~BNX2_EMAC_MODE_MPKT;
2543                 REG_WR(bp, BNX2_EMAC_MODE, val);
2544
2545                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2546                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2547                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2548                 break;
2549         }
2550         case PCI_D3hot: {
2551                 int i;
2552                 u32 val, wol_msg;
2553
2554                 if (bp->wol) {
2555                         u32 advertising;
2556                         u8 autoneg;
2557
2558                         autoneg = bp->autoneg;
2559                         advertising = bp->advertising;
2560
2561                         bp->autoneg = AUTONEG_SPEED;
2562                         bp->advertising = ADVERTISED_10baseT_Half |
2563                                 ADVERTISED_10baseT_Full |
2564                                 ADVERTISED_100baseT_Half |
2565                                 ADVERTISED_100baseT_Full |
2566                                 ADVERTISED_Autoneg;
2567
2568                         bnx2_setup_copper_phy(bp);
2569
2570                         bp->autoneg = autoneg;
2571                         bp->advertising = advertising;
2572
2573                         bnx2_set_mac_addr(bp);
2574
2575                         val = REG_RD(bp, BNX2_EMAC_MODE);
2576
2577                         /* Enable port mode. */
2578                         val &= ~BNX2_EMAC_MODE_PORT;
2579                         val |= BNX2_EMAC_MODE_PORT_MII |
2580                                BNX2_EMAC_MODE_MPKT_RCVD |
2581                                BNX2_EMAC_MODE_ACPI_RCVD |
2582                                BNX2_EMAC_MODE_MPKT;
2583
2584                         REG_WR(bp, BNX2_EMAC_MODE, val);
2585
2586                         /* receive all multicast */
2587                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2588                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2589                                        0xffffffff);
2590                         }
2591                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2592                                BNX2_EMAC_RX_MODE_SORT_MODE);
2593
2594                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2595                               BNX2_RPM_SORT_USER0_MC_EN;
2596                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2597                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2598                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2599                                BNX2_RPM_SORT_USER0_ENA);
2600
2601                         /* Need to enable EMAC and RPM for WOL. */
2602                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2603                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2604                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2605                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2606
2607                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2608                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2609                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2610
2611                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2612                 }
2613                 else {
2614                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2615                 }
2616
2617                 if (!(bp->flags & NO_WOL_FLAG))
2618                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2619
2620                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2621                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2622                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2623
2624                         if (bp->wol)
2625                                 pmcsr |= 3;
2626                 }
2627                 else {
2628                         pmcsr |= 3;
2629                 }
2630                 if (bp->wol) {
2631                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2632                 }
2633                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2634                                       pmcsr);
2635
2636                 /* No more memory access after this point until
2637                  * device is brought back to D0.
2638                  */
2639                 udelay(50);
2640                 break;
2641         }
2642         default:
2643                 return -EINVAL;
2644         }
2645         return 0;
2646 }
2647
2648 static int
2649 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2650 {
2651         u32 val;
2652         int j;
2653
2654         /* Request access to the flash interface. */
2655         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2656         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2657                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2658                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2659                         break;
2660
2661                 udelay(5);
2662         }
2663
2664         if (j >= NVRAM_TIMEOUT_COUNT)
2665                 return -EBUSY;
2666
2667         return 0;
2668 }
2669
2670 static int
2671 bnx2_release_nvram_lock(struct bnx2 *bp)
2672 {
2673         int j;
2674         u32 val;
2675
2676         /* Relinquish nvram interface. */
2677         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2678
2679         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2680                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2681                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2682                         break;
2683
2684                 udelay(5);
2685         }
2686
2687         if (j >= NVRAM_TIMEOUT_COUNT)
2688                 return -EBUSY;
2689
2690         return 0;
2691 }
2692
2693
2694 static int
2695 bnx2_enable_nvram_write(struct bnx2 *bp)
2696 {
2697         u32 val;
2698
2699         val = REG_RD(bp, BNX2_MISC_CFG);
2700         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2701
2702         if (!bp->flash_info->buffered) {
2703                 int j;
2704
2705                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2706                 REG_WR(bp, BNX2_NVM_COMMAND,
2707                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2708
2709                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2710                         udelay(5);
2711
2712                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2713                         if (val & BNX2_NVM_COMMAND_DONE)
2714                                 break;
2715                 }
2716
2717                 if (j >= NVRAM_TIMEOUT_COUNT)
2718                         return -EBUSY;
2719         }
2720         return 0;
2721 }
2722
2723 static void
2724 bnx2_disable_nvram_write(struct bnx2 *bp)
2725 {
2726         u32 val;
2727
2728         val = REG_RD(bp, BNX2_MISC_CFG);
2729         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2730 }
2731
2732
2733 static void
2734 bnx2_enable_nvram_access(struct bnx2 *bp)
2735 {
2736         u32 val;
2737
2738         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2739         /* Enable both bits, even on read. */
2740         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2741                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2742 }
2743
2744 static void
2745 bnx2_disable_nvram_access(struct bnx2 *bp)
2746 {
2747         u32 val;
2748
2749         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2750         /* Disable both bits, even after read. */
2751         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2752                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2753                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2754 }
2755
2756 static int
2757 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2758 {
2759         u32 cmd;
2760         int j;
2761
2762         if (bp->flash_info->buffered)
2763                 /* Buffered flash, no erase needed */
2764                 return 0;
2765
2766         /* Build an erase command */
2767         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2768               BNX2_NVM_COMMAND_DOIT;
2769
2770         /* Need to clear DONE bit separately. */
2771         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2772
2773         /* Address of the NVRAM to read from. */
2774         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2775
2776         /* Issue an erase command. */
2777         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2778
2779         /* Wait for completion. */
2780         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2781                 u32 val;
2782
2783                 udelay(5);
2784
2785                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2786                 if (val & BNX2_NVM_COMMAND_DONE)
2787                         break;
2788         }
2789
2790         if (j >= NVRAM_TIMEOUT_COUNT)
2791                 return -EBUSY;
2792
2793         return 0;
2794 }
2795
2796 static int
2797 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2798 {
2799         u32 cmd;
2800         int j;
2801
2802         /* Build the command word. */
2803         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2804
2805         /* Calculate an offset of a buffered flash. */
2806         if (bp->flash_info->buffered) {
2807                 offset = ((offset / bp->flash_info->page_size) <<
2808                            bp->flash_info->page_bits) +
2809                           (offset % bp->flash_info->page_size);
2810         }
2811
2812         /* Need to clear DONE bit separately. */
2813         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2814
2815         /* Address of the NVRAM to read from. */
2816         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2817
2818         /* Issue a read command. */
2819         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2820
2821         /* Wait for completion. */
2822         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2823                 u32 val;
2824
2825                 udelay(5);
2826
2827                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2828                 if (val & BNX2_NVM_COMMAND_DONE) {
2829                         val = REG_RD(bp, BNX2_NVM_READ);
2830
2831                         val = be32_to_cpu(val);
2832                         memcpy(ret_val, &val, 4);
2833                         break;
2834                 }
2835         }
2836         if (j >= NVRAM_TIMEOUT_COUNT)
2837                 return -EBUSY;
2838
2839         return 0;
2840 }
2841
2842
2843 static int
2844 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2845 {
2846         u32 cmd, val32;
2847         int j;
2848
2849         /* Build the command word. */
2850         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2851
2852         /* Calculate an offset of a buffered flash. */
2853         if (bp->flash_info->buffered) {
2854                 offset = ((offset / bp->flash_info->page_size) <<
2855                           bp->flash_info->page_bits) +
2856                          (offset % bp->flash_info->page_size);
2857         }
2858
2859         /* Need to clear DONE bit separately. */
2860         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2861
2862         memcpy(&val32, val, 4);
2863         val32 = cpu_to_be32(val32);
2864
2865         /* Write the data. */
2866         REG_WR(bp, BNX2_NVM_WRITE, val32);
2867
2868         /* Address of the NVRAM to write to. */
2869         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2870
2871         /* Issue the write command. */
2872         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2873
2874         /* Wait for completion. */
2875         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2876                 udelay(5);
2877
2878                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2879                         break;
2880         }
2881         if (j >= NVRAM_TIMEOUT_COUNT)
2882                 return -EBUSY;
2883
2884         return 0;
2885 }
2886
2887 static int
2888 bnx2_init_nvram(struct bnx2 *bp)
2889 {
2890         u32 val;
2891         int j, entry_count, rc;
2892         struct flash_spec *flash;
2893
2894         /* Determine the selected interface. */
2895         val = REG_RD(bp, BNX2_NVM_CFG1);
2896
2897         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2898
2899         rc = 0;
2900         if (val & 0x40000000) {
2901
2902                 /* Flash interface has been reconfigured */
2903                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2904                      j++, flash++) {
2905                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2906                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2907                                 bp->flash_info = flash;
2908                                 break;
2909                         }
2910                 }
2911         }
2912         else {
2913                 u32 mask;
2914                 /* Not yet been reconfigured */
2915
2916                 if (val & (1 << 23))
2917                         mask = FLASH_BACKUP_STRAP_MASK;
2918                 else
2919                         mask = FLASH_STRAP_MASK;
2920
2921                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2922                         j++, flash++) {
2923
2924                         if ((val & mask) == (flash->strapping & mask)) {
2925                                 bp->flash_info = flash;
2926
2927                                 /* Request access to the flash interface. */
2928                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2929                                         return rc;
2930
2931                                 /* Enable access to flash interface */
2932                                 bnx2_enable_nvram_access(bp);
2933
2934                                 /* Reconfigure the flash interface */
2935                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2936                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2937                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2938                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2939
2940                                 /* Disable access to flash interface */
2941                                 bnx2_disable_nvram_access(bp);
2942                                 bnx2_release_nvram_lock(bp);
2943
2944                                 break;
2945                         }
2946                 }
2947         } /* if (val & 0x40000000) */
2948
2949         if (j == entry_count) {
2950                 bp->flash_info = NULL;
2951                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2952                 return -ENODEV;
2953         }
2954
2955         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2956         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2957         if (val)
2958                 bp->flash_size = val;
2959         else
2960                 bp->flash_size = bp->flash_info->total_size;
2961
2962         return rc;
2963 }
2964
2965 static int
2966 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2967                 int buf_size)
2968 {
2969         int rc = 0;
2970         u32 cmd_flags, offset32, len32, extra;
2971
2972         if (buf_size == 0)
2973                 return 0;
2974
2975         /* Request access to the flash interface. */
2976         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2977                 return rc;
2978
2979         /* Enable access to flash interface */
2980         bnx2_enable_nvram_access(bp);
2981
2982         len32 = buf_size;
2983         offset32 = offset;
2984         extra = 0;
2985
2986         cmd_flags = 0;
2987
2988         if (offset32 & 3) {
2989                 u8 buf[4];
2990                 u32 pre_len;
2991
2992                 offset32 &= ~3;
2993                 pre_len = 4 - (offset & 3);
2994
2995                 if (pre_len >= len32) {
2996                         pre_len = len32;
2997                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2998                                     BNX2_NVM_COMMAND_LAST;
2999                 }
3000                 else {
3001                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3002                 }
3003
3004                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3005
3006                 if (rc)
3007                         return rc;
3008
3009                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3010
3011                 offset32 += 4;
3012                 ret_buf += pre_len;
3013                 len32 -= pre_len;
3014         }
3015         if (len32 & 3) {
3016                 extra = 4 - (len32 & 3);
3017                 len32 = (len32 + 4) & ~3;
3018         }
3019
3020         if (len32 == 4) {
3021                 u8 buf[4];
3022
3023                 if (cmd_flags)
3024                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3025                 else
3026                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3027                                     BNX2_NVM_COMMAND_LAST;
3028
3029                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3030
3031                 memcpy(ret_buf, buf, 4 - extra);
3032         }
3033         else if (len32 > 0) {
3034                 u8 buf[4];
3035
3036                 /* Read the first word. */
3037                 if (cmd_flags)
3038                         cmd_flags = 0;
3039                 else
3040                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3041
3042                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3043
3044                 /* Advance to the next dword. */
3045                 offset32 += 4;
3046                 ret_buf += 4;
3047                 len32 -= 4;
3048
3049                 while (len32 > 4 && rc == 0) {
3050                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3051
3052                         /* Advance to the next dword. */
3053                         offset32 += 4;
3054                         ret_buf += 4;
3055                         len32 -= 4;
3056                 }
3057
3058                 if (rc)
3059                         return rc;
3060
3061                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3062                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3063
3064                 memcpy(ret_buf, buf, 4 - extra);
3065         }
3066
3067         /* Disable access to flash interface */
3068         bnx2_disable_nvram_access(bp);
3069
3070         bnx2_release_nvram_lock(bp);
3071
3072         return rc;
3073 }
3074
3075 static int
3076 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3077                 int buf_size)
3078 {
3079         u32 written, offset32, len32;
3080         u8 *buf, start[4], end[4], *flash_buffer = NULL;
3081         int rc = 0;
3082         int align_start, align_end;
3083
3084         buf = data_buf;
3085         offset32 = offset;
3086         len32 = buf_size;
3087         align_start = align_end = 0;
3088
3089         if ((align_start = (offset32 & 3))) {
3090                 offset32 &= ~3;
3091                 len32 += align_start;
3092                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3093                         return rc;
3094         }
3095
3096         if (len32 & 3) {
3097                 if ((len32 > 4) || !align_start) {
3098                         align_end = 4 - (len32 & 3);
3099                         len32 += align_end;
3100                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3101                                 end, 4))) {
3102                                 return rc;
3103                         }
3104                 }
3105         }
3106
3107         if (align_start || align_end) {
3108                 buf = kmalloc(len32, GFP_KERNEL);
3109                 if (buf == 0)
3110                         return -ENOMEM;
3111                 if (align_start) {
3112                         memcpy(buf, start, 4);
3113                 }
3114                 if (align_end) {
3115                         memcpy(buf + len32 - 4, end, 4);
3116                 }
3117                 memcpy(buf + align_start, data_buf, buf_size);
3118         }
3119
3120         if (bp->flash_info->buffered == 0) {
3121                 flash_buffer = kmalloc(264, GFP_KERNEL);
3122                 if (flash_buffer == NULL) {
3123                         rc = -ENOMEM;
3124                         goto nvram_write_end;
3125                 }
3126         }
3127
3128         written = 0;
3129         while ((written < len32) && (rc == 0)) {
3130                 u32 page_start, page_end, data_start, data_end;
3131                 u32 addr, cmd_flags;
3132                 int i;
3133
3134                 /* Find the page_start addr */
3135                 page_start = offset32 + written;
3136                 page_start -= (page_start % bp->flash_info->page_size);
3137                 /* Find the page_end addr */
3138                 page_end = page_start + bp->flash_info->page_size;
3139                 /* Find the data_start addr */
3140                 data_start = (written == 0) ? offset32 : page_start;
3141                 /* Find the data_end addr */
3142                 data_end = (page_end > offset32 + len32) ? 
3143                         (offset32 + len32) : page_end;
3144
3145                 /* Request access to the flash interface. */
3146                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3147                         goto nvram_write_end;
3148
3149                 /* Enable access to flash interface */
3150                 bnx2_enable_nvram_access(bp);
3151
3152                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3153                 if (bp->flash_info->buffered == 0) {
3154                         int j;
3155
3156                         /* Read the whole page into the buffer
3157                          * (non-buffer flash only) */
3158                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3159                                 if (j == (bp->flash_info->page_size - 4)) {
3160                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3161                                 }
3162                                 rc = bnx2_nvram_read_dword(bp,
3163                                         page_start + j, 
3164                                         &flash_buffer[j], 
3165                                         cmd_flags);
3166
3167                                 if (rc)
3168                                         goto nvram_write_end;
3169
3170                                 cmd_flags = 0;
3171                         }
3172                 }
3173
3174                 /* Enable writes to flash interface (unlock write-protect) */
3175                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3176                         goto nvram_write_end;
3177
3178                 /* Erase the page */
3179                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3180                         goto nvram_write_end;
3181
3182                 /* Re-enable the write again for the actual write */
3183                 bnx2_enable_nvram_write(bp);
3184
3185                 /* Loop to write back the buffer data from page_start to
3186                  * data_start */
3187                 i = 0;
3188                 if (bp->flash_info->buffered == 0) {
3189                         for (addr = page_start; addr < data_start;
3190                                 addr += 4, i += 4) {
3191                                 
3192                                 rc = bnx2_nvram_write_dword(bp, addr,
3193                                         &flash_buffer[i], cmd_flags);
3194
3195                                 if (rc != 0)
3196                                         goto nvram_write_end;
3197
3198                                 cmd_flags = 0;
3199                         }
3200                 }
3201
3202                 /* Loop to write the new data from data_start to data_end */
3203                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3204                         if ((addr == page_end - 4) ||
3205                                 ((bp->flash_info->buffered) &&
3206                                  (addr == data_end - 4))) {
3207
3208                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3209                         }
3210                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3211                                 cmd_flags);
3212
3213                         if (rc != 0)
3214                                 goto nvram_write_end;
3215
3216                         cmd_flags = 0;
3217                         buf += 4;
3218                 }
3219
3220                 /* Loop to write back the buffer data from data_end
3221                  * to page_end */
3222                 if (bp->flash_info->buffered == 0) {
3223                         for (addr = data_end; addr < page_end;
3224                                 addr += 4, i += 4) {
3225                         
3226                                 if (addr == page_end-4) {
3227                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3228                                 }
3229                                 rc = bnx2_nvram_write_dword(bp, addr,
3230                                         &flash_buffer[i], cmd_flags);
3231
3232                                 if (rc != 0)
3233                                         goto nvram_write_end;
3234
3235                                 cmd_flags = 0;
3236                         }
3237                 }
3238
3239                 /* Disable writes to flash interface (lock write-protect) */
3240                 bnx2_disable_nvram_write(bp);
3241
3242                 /* Disable access to flash interface */
3243                 bnx2_disable_nvram_access(bp);
3244                 bnx2_release_nvram_lock(bp);
3245
3246                 /* Increment written */
3247                 written += data_end - data_start;
3248         }
3249
3250 nvram_write_end:
3251         if (bp->flash_info->buffered == 0)
3252                 kfree(flash_buffer);
3253
3254         if (align_start || align_end)
3255                 kfree(buf);
3256         return rc;
3257 }
3258
3259 static int
3260 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3261 {
3262         u32 val;
3263         int i, rc = 0;
3264
3265         /* Wait for the current PCI transaction to complete before
3266          * issuing a reset. */
3267         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3268                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3269                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3270                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3271                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3272         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3273         udelay(5);
3274
3275         /* Wait for the firmware to tell us it is ok to issue a reset. */
3276         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3277
3278         /* Deposit a driver reset signature so the firmware knows that
3279          * this is a soft reset. */
3280         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3281                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3282
3283         /* Do a dummy read to force the chip to complete all current transaction
3284          * before we issue a reset. */
3285         val = REG_RD(bp, BNX2_MISC_ID);
3286
3287         val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3288               BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3289               BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3290
3291         /* Chip reset. */
3292         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3293
3294         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3295             (CHIP_ID(bp) == CHIP_ID_5706_A1))
3296                 msleep(15);
3297
3298         /* Reset takes approximate 30 usec */
3299         for (i = 0; i < 10; i++) {
3300                 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3301                 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3302                             BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3303                         break;
3304                 }
3305                 udelay(10);
3306         }
3307
3308         if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3309                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3310                 printk(KERN_ERR PFX "Chip reset did not complete\n");
3311                 return -EBUSY;
3312         }
3313
3314         /* Make sure byte swapping is properly configured. */
3315         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3316         if (val != 0x01020304) {
3317                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3318                 return -ENODEV;
3319         }
3320
3321         /* Wait for the firmware to finish its initialization. */
3322         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3323         if (rc)
3324                 return rc;
3325
3326         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3327                 /* Adjust the voltage regular to two steps lower.  The default
3328                  * of this register is 0x0000000e. */
3329                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3330
3331                 /* Remove bad rbuf memory from the free pool. */
3332                 rc = bnx2_alloc_bad_rbuf(bp);
3333         }
3334
3335         return rc;
3336 }
3337
3338 static int
3339 bnx2_init_chip(struct bnx2 *bp)
3340 {
3341         u32 val;
3342         int rc;
3343
3344         /* Make sure the interrupt is not active. */
3345         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3346
3347         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3348               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3349 #ifdef __BIG_ENDIAN
3350               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | 
3351 #endif
3352               BNX2_DMA_CONFIG_CNTL_WORD_SWAP | 
3353               DMA_READ_CHANS << 12 |
3354               DMA_WRITE_CHANS << 16;
3355
3356         val |= (0x2 << 20) | (1 << 11);
3357
3358         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3359                 val |= (1 << 23);
3360
3361         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3362             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3363                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3364
3365         REG_WR(bp, BNX2_DMA_CONFIG, val);
3366
3367         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3368                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3369                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3370                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3371         }
3372
3373         if (bp->flags & PCIX_FLAG) {
3374                 u16 val16;
3375
3376                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3377                                      &val16);
3378                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3379                                       val16 & ~PCI_X_CMD_ERO);
3380         }
3381
3382         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3383                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3384                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3385                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3386
3387         /* Initialize context mapping and zero out the quick contexts.  The
3388          * context block must have already been enabled. */
3389         bnx2_init_context(bp);
3390
3391         if ((rc = bnx2_init_cpus(bp)) != 0)
3392                 return rc;
3393
3394         bnx2_init_nvram(bp);
3395
3396         bnx2_set_mac_addr(bp);
3397
3398         val = REG_RD(bp, BNX2_MQ_CONFIG);
3399         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3400         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3401         REG_WR(bp, BNX2_MQ_CONFIG, val);
3402
3403         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3404         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3405         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3406
3407         val = (BCM_PAGE_BITS - 8) << 24;
3408         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3409
3410         /* Configure page size. */
3411         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3412         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3413         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3414         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3415
3416         val = bp->mac_addr[0] +
3417               (bp->mac_addr[1] << 8) +
3418               (bp->mac_addr[2] << 16) +
3419               bp->mac_addr[3] +
3420               (bp->mac_addr[4] << 8) +
3421               (bp->mac_addr[5] << 16);
3422         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3423
3424         /* Program the MTU.  Also include 4 bytes for CRC32. */
3425         val = bp->dev->mtu + ETH_HLEN + 4;
3426         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3427                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3428         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3429
3430         bp->last_status_idx = 0;
3431         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3432
3433         /* Set up how to generate a link change interrupt. */
3434         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3435
3436         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3437                (u64) bp->status_blk_mapping & 0xffffffff);
3438         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3439
3440         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3441                (u64) bp->stats_blk_mapping & 0xffffffff);
3442         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3443                (u64) bp->stats_blk_mapping >> 32);
3444
3445         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, 
3446                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3447
3448         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3449                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3450
3451         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3452                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3453
3454         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3455
3456         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3457
3458         REG_WR(bp, BNX2_HC_COM_TICKS,
3459                (bp->com_ticks_int << 16) | bp->com_ticks);
3460
3461         REG_WR(bp, BNX2_HC_CMD_TICKS,
3462                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3463
3464         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3465         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3466
3467         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3468                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3469         else {
3470                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3471                        BNX2_HC_CONFIG_TX_TMR_MODE |
3472                        BNX2_HC_CONFIG_COLLECT_STATS);
3473         }
3474
3475         /* Clear internal stats counters. */
3476         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3477
3478         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3479
3480         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3481             BNX2_PORT_FEATURE_ASF_ENABLED)
3482                 bp->flags |= ASF_ENABLE_FLAG;
3483
3484         /* Initialize the receive filter. */
3485         bnx2_set_rx_mode(bp->dev);
3486
3487         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3488                           0);
3489
3490         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3491         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3492
3493         udelay(20);
3494
3495         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3496
3497         return rc;
3498 }
3499
3500
3501 static void
3502 bnx2_init_tx_ring(struct bnx2 *bp)
3503 {
3504         struct tx_bd *txbd;
3505         u32 val;
3506
3507         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3508                 
3509         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3510         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3511
3512         bp->tx_prod = 0;
3513         bp->tx_cons = 0;
3514         bp->hw_tx_cons = 0;
3515         bp->tx_prod_bseq = 0;
3516         
3517         val = BNX2_L2CTX_TYPE_TYPE_L2;
3518         val |= BNX2_L2CTX_TYPE_SIZE_L2;
3519         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3520
3521         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3522         val |= 8 << 16;
3523         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3524
3525         val = (u64) bp->tx_desc_mapping >> 32;
3526         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3527
3528         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3529         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3530 }
3531
3532 static void
3533 bnx2_init_rx_ring(struct bnx2 *bp)
3534 {
3535         struct rx_bd *rxbd;
3536         int i;
3537         u16 prod, ring_prod; 
3538         u32 val;
3539
3540         /* 8 for CRC and VLAN */
3541         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3542         /* 8 for alignment */
3543         bp->rx_buf_size = bp->rx_buf_use_size + 8;
3544
3545         ring_prod = prod = bp->rx_prod = 0;
3546         bp->rx_cons = 0;
3547         bp->hw_rx_cons = 0;
3548         bp->rx_prod_bseq = 0;
3549                 
3550         for (i = 0; i < bp->rx_max_ring; i++) {
3551                 int j;
3552
3553                 rxbd = &bp->rx_desc_ring[i][0];
3554                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3555                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3556                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3557                 }
3558                 if (i == (bp->rx_max_ring - 1))
3559                         j = 0;
3560                 else
3561                         j = i + 1;
3562                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3563                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3564                                        0xffffffff;
3565         }
3566
3567         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3568         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3569         val |= 0x02 << 8;
3570         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3571
3572         val = (u64) bp->rx_desc_mapping[0] >> 32;
3573         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3574
3575         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3576         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3577
3578         for (i = 0; i < bp->rx_ring_size; i++) {
3579                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3580                         break;
3581                 }
3582                 prod = NEXT_RX_BD(prod);
3583                 ring_prod = RX_RING_IDX(prod);
3584         }
3585         bp->rx_prod = prod;
3586
3587         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3588
3589         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3590 }
3591
3592 static void
3593 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3594 {
3595         u32 num_rings, max;
3596
3597         bp->rx_ring_size = size;
3598         num_rings = 1;
3599         while (size > MAX_RX_DESC_CNT) {
3600                 size -= MAX_RX_DESC_CNT;
3601                 num_rings++;
3602         }
3603         /* round to next power of 2 */
3604         max = MAX_RX_RINGS;
3605         while ((max & num_rings) == 0)
3606                 max >>= 1;
3607
3608         if (num_rings != max)
3609                 max <<= 1;
3610
3611         bp->rx_max_ring = max;
3612         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3613 }
3614
3615 static void
3616 bnx2_free_tx_skbs(struct bnx2 *bp)
3617 {
3618         int i;
3619
3620         if (bp->tx_buf_ring == NULL)
3621                 return;
3622
3623         for (i = 0; i < TX_DESC_CNT; ) {
3624                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3625                 struct sk_buff *skb = tx_buf->skb;
3626                 int j, last;
3627
3628                 if (skb == NULL) {
3629                         i++;
3630                         continue;
3631                 }
3632
3633                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3634                         skb_headlen(skb), PCI_DMA_TODEVICE);
3635
3636                 tx_buf->skb = NULL;
3637
3638                 last = skb_shinfo(skb)->nr_frags;
3639                 for (j = 0; j < last; j++) {
3640                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3641                         pci_unmap_page(bp->pdev,
3642                                 pci_unmap_addr(tx_buf, mapping),
3643                                 skb_shinfo(skb)->frags[j].size,
3644                                 PCI_DMA_TODEVICE);
3645                 }
3646                 dev_kfree_skb(skb);
3647                 i += j + 1;
3648         }
3649
3650 }
3651
3652 static void
3653 bnx2_free_rx_skbs(struct bnx2 *bp)
3654 {
3655         int i;
3656
3657         if (bp->rx_buf_ring == NULL)
3658                 return;
3659
3660         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3661                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3662                 struct sk_buff *skb = rx_buf->skb;
3663
3664                 if (skb == NULL)
3665                         continue;
3666
3667                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3668                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3669
3670                 rx_buf->skb = NULL;
3671
3672                 dev_kfree_skb(skb);
3673         }
3674 }
3675
3676 static void
3677 bnx2_free_skbs(struct bnx2 *bp)
3678 {
3679         bnx2_free_tx_skbs(bp);
3680         bnx2_free_rx_skbs(bp);
3681 }
3682
3683 static int
3684 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3685 {
3686         int rc;
3687
3688         rc = bnx2_reset_chip(bp, reset_code);
3689         bnx2_free_skbs(bp);
3690         if (rc)
3691                 return rc;
3692
3693         if ((rc = bnx2_init_chip(bp)) != 0)
3694                 return rc;
3695
3696         bnx2_init_tx_ring(bp);
3697         bnx2_init_rx_ring(bp);
3698         return 0;
3699 }
3700
3701 static int
3702 bnx2_init_nic(struct bnx2 *bp)
3703 {
3704         int rc;
3705
3706         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3707                 return rc;
3708
3709         bnx2_init_phy(bp);
3710         bnx2_set_link(bp);
3711         return 0;
3712 }
3713
3714 static int
3715 bnx2_test_registers(struct bnx2 *bp)
3716 {
3717         int ret;
3718         int i;
3719         static const struct {
3720                 u16   offset;
3721                 u16   flags;
3722                 u32   rw_mask;
3723                 u32   ro_mask;
3724         } reg_tbl[] = {
3725                 { 0x006c, 0, 0x00000000, 0x0000003f },
3726                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3727                 { 0x0094, 0, 0x00000000, 0x00000000 },
3728
3729                 { 0x0404, 0, 0x00003f00, 0x00000000 },
3730                 { 0x0418, 0, 0x00000000, 0xffffffff },
3731                 { 0x041c, 0, 0x00000000, 0xffffffff },
3732                 { 0x0420, 0, 0x00000000, 0x80ffffff },
3733                 { 0x0424, 0, 0x00000000, 0x00000000 },
3734                 { 0x0428, 0, 0x00000000, 0x00000001 },
3735                 { 0x0450, 0, 0x00000000, 0x0000ffff },
3736                 { 0x0454, 0, 0x00000000, 0xffffffff },
3737                 { 0x0458, 0, 0x00000000, 0xffffffff },
3738
3739                 { 0x0808, 0, 0x00000000, 0xffffffff },
3740                 { 0x0854, 0, 0x00000000, 0xffffffff },
3741                 { 0x0868, 0, 0x00000000, 0x77777777 },
3742                 { 0x086c, 0, 0x00000000, 0x77777777 },
3743                 { 0x0870, 0, 0x00000000, 0x77777777 },
3744                 { 0x0874, 0, 0x00000000, 0x77777777 },
3745
3746                 { 0x0c00, 0, 0x00000000, 0x00000001 },
3747                 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3748                 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3749
3750                 { 0x1000, 0, 0x00000000, 0x00000001 },
3751                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3752
3753                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3754                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3755                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3756                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3757                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3758                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3759                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3760                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3761                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3762                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3763
3764                 { 0x1800, 0, 0x00000000, 0x00000001 },
3765                 { 0x1804, 0, 0x00000000, 0x00000003 },
3766
3767                 { 0x2800, 0, 0x00000000, 0x00000001 },
3768                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3769                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3770                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3771                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3772                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3773                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3774                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3775                 { 0x2840, 0, 0x00000000, 0xffffffff },
3776                 { 0x2844, 0, 0x00000000, 0xffffffff },
3777                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3778                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3779
3780                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3781                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3782
3783                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3784                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3785                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3786                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3787                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3788                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3789                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3790                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3791                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3792
3793                 { 0x5004, 0, 0x00000000, 0x0000007f },
3794                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3795                 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3796
3797                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3798                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3799                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3800                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3801                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3802                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3803                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3804                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3805                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3806
3807                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3808                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3809                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3810                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3811                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3812                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3813                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3814                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3815                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3816                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3817                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3818                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3819                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3820                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3821                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3822                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3823                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3824                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3825                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3826                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3827                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3828                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3829                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3830
3831                 { 0xffff, 0, 0x00000000, 0x00000000 },
3832         };
3833
3834         ret = 0;
3835         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3836                 u32 offset, rw_mask, ro_mask, save_val, val;
3837
3838                 offset = (u32) reg_tbl[i].offset;
3839                 rw_mask = reg_tbl[i].rw_mask;
3840                 ro_mask = reg_tbl[i].ro_mask;
3841
3842                 save_val = readl(bp->regview + offset);
3843
3844                 writel(0, bp->regview + offset);
3845
3846                 val = readl(bp->regview + offset);
3847                 if ((val & rw_mask) != 0) {
3848                         goto reg_test_err;
3849                 }
3850
3851                 if ((val & ro_mask) != (save_val & ro_mask)) {
3852                         goto reg_test_err;
3853                 }
3854
3855                 writel(0xffffffff, bp->regview + offset);
3856
3857                 val = readl(bp->regview + offset);
3858                 if ((val & rw_mask) != rw_mask) {
3859                         goto reg_test_err;
3860                 }
3861
3862                 if ((val & ro_mask) != (save_val & ro_mask)) {
3863                         goto reg_test_err;
3864                 }
3865
3866                 writel(save_val, bp->regview + offset);
3867                 continue;
3868
3869 reg_test_err:
3870                 writel(save_val, bp->regview + offset);
3871                 ret = -ENODEV;
3872                 break;
3873         }
3874         return ret;
3875 }
3876
3877 static int
3878 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3879 {
3880         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3881                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3882         int i;
3883
3884         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3885                 u32 offset;
3886
3887                 for (offset = 0; offset < size; offset += 4) {
3888
3889                         REG_WR_IND(bp, start + offset, test_pattern[i]);
3890
3891                         if (REG_RD_IND(bp, start + offset) !=
3892                                 test_pattern[i]) {
3893                                 return -ENODEV;
3894                         }
3895                 }
3896         }
3897         return 0;
3898 }
3899
3900 static int
3901 bnx2_test_memory(struct bnx2 *bp)
3902 {
3903         int ret = 0;
3904         int i;
3905         static const struct {
3906                 u32   offset;
3907                 u32   len;
3908         } mem_tbl[] = {
3909                 { 0x60000,  0x4000 },
3910                 { 0xa0000,  0x3000 },
3911                 { 0xe0000,  0x4000 },
3912                 { 0x120000, 0x4000 },
3913                 { 0x1a0000, 0x4000 },
3914                 { 0x160000, 0x4000 },
3915                 { 0xffffffff, 0    },
3916         };
3917
3918         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3919                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3920                         mem_tbl[i].len)) != 0) {
3921                         return ret;
3922                 }
3923         }
3924         
3925         return ret;
3926 }
3927
3928 #define BNX2_MAC_LOOPBACK       0
3929 #define BNX2_PHY_LOOPBACK       1
3930
3931 static int
3932 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3933 {
3934         unsigned int pkt_size, num_pkts, i;
3935         struct sk_buff *skb, *rx_skb;
3936         unsigned char *packet;
3937         u16 rx_start_idx, rx_idx;
3938         dma_addr_t map;
3939         struct tx_bd *txbd;
3940         struct sw_bd *rx_buf;
3941         struct l2_fhdr *rx_hdr;
3942         int ret = -ENODEV;
3943
3944         if (loopback_mode == BNX2_MAC_LOOPBACK) {
3945                 bp->loopback = MAC_LOOPBACK;
3946                 bnx2_set_mac_loopback(bp);
3947         }
3948         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3949                 bp->loopback = 0;
3950                 bnx2_set_phy_loopback(bp);
3951         }
3952         else
3953                 return -EINVAL;
3954
3955         pkt_size = 1514;
3956         skb = dev_alloc_skb(pkt_size);
3957         if (!skb)
3958                 return -ENOMEM;
3959         packet = skb_put(skb, pkt_size);
3960         memcpy(packet, bp->mac_addr, 6);
3961         memset(packet + 6, 0x0, 8);
3962         for (i = 14; i < pkt_size; i++)
3963                 packet[i] = (unsigned char) (i & 0xff);
3964
3965         map = pci_map_single(bp->pdev, skb->data, pkt_size,
3966                 PCI_DMA_TODEVICE);
3967
3968         REG_WR(bp, BNX2_HC_COMMAND,
3969                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3970
3971         REG_RD(bp, BNX2_HC_COMMAND);
3972
3973         udelay(5);
3974         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3975
3976         num_pkts = 0;
3977
3978         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3979
3980         txbd->tx_bd_haddr_hi = (u64) map >> 32;
3981         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3982         txbd->tx_bd_mss_nbytes = pkt_size;
3983         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3984
3985         num_pkts++;
3986         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3987         bp->tx_prod_bseq += pkt_size;
3988
3989         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3990         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3991
3992         udelay(100);
3993
3994         REG_WR(bp, BNX2_HC_COMMAND,
3995                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3996
3997         REG_RD(bp, BNX2_HC_COMMAND);
3998
3999         udelay(5);
4000
4001         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4002         dev_kfree_skb(skb);
4003
4004         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4005                 goto loopback_test_done;
4006         }
4007
4008         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4009         if (rx_idx != rx_start_idx + num_pkts) {
4010                 goto loopback_test_done;
4011         }
4012
4013         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4014         rx_skb = rx_buf->skb;
4015
4016         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4017         skb_reserve(rx_skb, bp->rx_offset);
4018
4019         pci_dma_sync_single_for_cpu(bp->pdev,
4020                 pci_unmap_addr(rx_buf, mapping),
4021                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4022
4023         if (rx_hdr->l2_fhdr_status &
4024                 (L2_FHDR_ERRORS_BAD_CRC |
4025                 L2_FHDR_ERRORS_PHY_DECODE |
4026                 L2_FHDR_ERRORS_ALIGNMENT |
4027                 L2_FHDR_ERRORS_TOO_SHORT |
4028                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4029
4030                 goto loopback_test_done;
4031         }
4032
4033         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4034                 goto loopback_test_done;
4035         }
4036
4037         for (i = 14; i < pkt_size; i++) {
4038                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4039                         goto loopback_test_done;
4040                 }
4041         }
4042
4043         ret = 0;
4044
4045 loopback_test_done:
4046         bp->loopback = 0;
4047         return ret;
4048 }
4049
4050 #define BNX2_MAC_LOOPBACK_FAILED        1
4051 #define BNX2_PHY_LOOPBACK_FAILED        2
4052 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4053                                          BNX2_PHY_LOOPBACK_FAILED)
4054
4055 static int
4056 bnx2_test_loopback(struct bnx2 *bp)
4057 {
4058         int rc = 0;
4059
4060         if (!netif_running(bp->dev))
4061                 return BNX2_LOOPBACK_FAILED;
4062
4063         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4064         spin_lock_bh(&bp->phy_lock);
4065         bnx2_init_phy(bp);
4066         spin_unlock_bh(&bp->phy_lock);
4067         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4068                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4069         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4070                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4071         return rc;
4072 }
4073
4074 #define NVRAM_SIZE 0x200
4075 #define CRC32_RESIDUAL 0xdebb20e3
4076
4077 static int
4078 bnx2_test_nvram(struct bnx2 *bp)
4079 {
4080         u32 buf[NVRAM_SIZE / 4];
4081         u8 *data = (u8 *) buf;
4082         int rc = 0;
4083         u32 magic, csum;
4084
4085         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4086                 goto test_nvram_done;
4087
4088         magic = be32_to_cpu(buf[0]);
4089         if (magic != 0x669955aa) {
4090                 rc = -ENODEV;
4091                 goto test_nvram_done;
4092         }
4093
4094         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4095                 goto test_nvram_done;
4096
4097         csum = ether_crc_le(0x100, data);
4098         if (csum != CRC32_RESIDUAL) {
4099                 rc = -ENODEV;
4100                 goto test_nvram_done;
4101         }
4102
4103         csum = ether_crc_le(0x100, data + 0x100);
4104         if (csum != CRC32_RESIDUAL) {
4105                 rc = -ENODEV;
4106         }
4107
4108 test_nvram_done:
4109         return rc;
4110 }
4111
4112 static int
4113 bnx2_test_link(struct bnx2 *bp)
4114 {
4115         u32 bmsr;
4116
4117         spin_lock_bh(&bp->phy_lock);
4118         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4119         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4120         spin_unlock_bh(&bp->phy_lock);
4121                 
4122         if (bmsr & BMSR_LSTATUS) {
4123                 return 0;
4124         }
4125         return -ENODEV;
4126 }
4127
4128 static int
4129 bnx2_test_intr(struct bnx2 *bp)
4130 {
4131         int i;
4132         u16 status_idx;
4133
4134         if (!netif_running(bp->dev))
4135                 return -ENODEV;
4136
4137         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4138
4139         /* This register is not touched during run-time. */
4140         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4141         REG_RD(bp, BNX2_HC_COMMAND);
4142
4143         for (i = 0; i < 10; i++) {
4144                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4145                         status_idx) {
4146
4147                         break;
4148                 }
4149
4150                 msleep_interruptible(10);
4151         }
4152         if (i < 10)
4153                 return 0;
4154
4155         return -ENODEV;
4156 }
4157
4158 static void
4159 bnx2_timer(unsigned long data)
4160 {
4161         struct bnx2 *bp = (struct bnx2 *) data;
4162         u32 msg;
4163
4164         if (!netif_running(bp->dev))
4165                 return;
4166
4167         if (atomic_read(&bp->intr_sem) != 0)
4168                 goto bnx2_restart_timer;
4169
4170         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4171         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4172
4173         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4174
4175         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4176             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
4177
4178                 spin_lock(&bp->phy_lock);
4179                 if (bp->serdes_an_pending) {
4180                         bp->serdes_an_pending--;
4181                 }
4182                 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4183                         u32 bmcr;
4184
4185                         bp->current_interval = bp->timer_interval;
4186
4187                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4188
4189                         if (bmcr & BMCR_ANENABLE) {
4190                                 u32 phy1, phy2;
4191
4192                                 bnx2_write_phy(bp, 0x1c, 0x7c00);
4193                                 bnx2_read_phy(bp, 0x1c, &phy1);
4194
4195                                 bnx2_write_phy(bp, 0x17, 0x0f01);
4196                                 bnx2_read_phy(bp, 0x15, &phy2);
4197                                 bnx2_write_phy(bp, 0x17, 0x0f01);
4198                                 bnx2_read_phy(bp, 0x15, &phy2);
4199
4200                                 if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4201                                         !(phy2 & 0x20)) {       /* no CONFIG */
4202
4203                                         bmcr &= ~BMCR_ANENABLE;
4204                                         bmcr |= BMCR_SPEED1000 |
4205                                                 BMCR_FULLDPLX;
4206                                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4207                                         bp->phy_flags |=
4208                                                 PHY_PARALLEL_DETECT_FLAG;
4209                                 }
4210                         }
4211                 }
4212                 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4213                         (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4214                         u32 phy2;
4215
4216                         bnx2_write_phy(bp, 0x17, 0x0f01);
4217                         bnx2_read_phy(bp, 0x15, &phy2);
4218                         if (phy2 & 0x20) {
4219                                 u32 bmcr;
4220
4221                                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4222                                 bmcr |= BMCR_ANENABLE;
4223                                 bnx2_write_phy(bp, MII_BMCR, bmcr);
4224
4225                                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4226
4227                         }
4228                 }
4229                 else
4230                         bp->current_interval = bp->timer_interval;
4231
4232                 spin_unlock(&bp->phy_lock);
4233         }
4234
4235 bnx2_restart_timer:
4236         mod_timer(&bp->timer, jiffies + bp->current_interval);
4237 }
4238
4239 /* Called with rtnl_lock */
4240 static int
4241 bnx2_open(struct net_device *dev)
4242 {
4243         struct bnx2 *bp = netdev_priv(dev);
4244         int rc;
4245
4246         bnx2_set_power_state(bp, PCI_D0);
4247         bnx2_disable_int(bp);
4248
4249         rc = bnx2_alloc_mem(bp);
4250         if (rc)
4251                 return rc;
4252
4253         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4254                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4255                 !disable_msi) {
4256
4257                 if (pci_enable_msi(bp->pdev) == 0) {
4258                         bp->flags |= USING_MSI_FLAG;
4259                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4260                                         dev);
4261                 }
4262                 else {
4263                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4264                                         SA_SHIRQ, dev->name, dev);
4265                 }
4266         }
4267         else {
4268                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4269                                 dev->name, dev);
4270         }
4271         if (rc) {
4272                 bnx2_free_mem(bp);
4273                 return rc;
4274         }
4275
4276         rc = bnx2_init_nic(bp);
4277
4278         if (rc) {
4279                 free_irq(bp->pdev->irq, dev);
4280                 if (bp->flags & USING_MSI_FLAG) {
4281                         pci_disable_msi(bp->pdev);
4282                         bp->flags &= ~USING_MSI_FLAG;
4283                 }
4284                 bnx2_free_skbs(bp);
4285                 bnx2_free_mem(bp);
4286                 return rc;
4287         }
4288         
4289         mod_timer(&bp->timer, jiffies + bp->current_interval);
4290
4291         atomic_set(&bp->intr_sem, 0);
4292
4293         bnx2_enable_int(bp);
4294
4295         if (bp->flags & USING_MSI_FLAG) {
4296                 /* Test MSI to make sure it is working
4297                  * If MSI test fails, go back to INTx mode
4298                  */
4299                 if (bnx2_test_intr(bp) != 0) {
4300                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4301                                " using MSI, switching to INTx mode. Please"
4302                                " report this failure to the PCI maintainer"
4303                                " and include system chipset information.\n",
4304                                bp->dev->name);
4305
4306                         bnx2_disable_int(bp);
4307                         free_irq(bp->pdev->irq, dev);
4308                         pci_disable_msi(bp->pdev);
4309                         bp->flags &= ~USING_MSI_FLAG;
4310
4311                         rc = bnx2_init_nic(bp);
4312
4313                         if (!rc) {
4314                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4315                                         SA_SHIRQ, dev->name, dev);
4316                         }
4317                         if (rc) {
4318                                 bnx2_free_skbs(bp);
4319                                 bnx2_free_mem(bp);
4320                                 del_timer_sync(&bp->timer);
4321                                 return rc;
4322                         }
4323                         bnx2_enable_int(bp);
4324                 }
4325         }
4326         if (bp->flags & USING_MSI_FLAG) {
4327                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4328         }
4329
4330         netif_start_queue(dev);
4331
4332         return 0;
4333 }
4334
4335 static void
4336 bnx2_reset_task(void *data)
4337 {
4338         struct bnx2 *bp = data;
4339
4340         if (!netif_running(bp->dev))
4341                 return;
4342
4343         bp->in_reset_task = 1;
4344         bnx2_netif_stop(bp);
4345
4346         bnx2_init_nic(bp);
4347
4348         atomic_set(&bp->intr_sem, 1);
4349         bnx2_netif_start(bp);
4350         bp->in_reset_task = 0;
4351 }
4352
4353 static void
4354 bnx2_tx_timeout(struct net_device *dev)
4355 {
4356         struct bnx2 *bp = netdev_priv(dev);
4357
4358         /* This allows the netif to be shutdown gracefully before resetting */
4359         schedule_work(&bp->reset_task);
4360 }
4361
4362 #ifdef BCM_VLAN
4363 /* Called with rtnl_lock */
4364 static void
4365 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4366 {
4367         struct bnx2 *bp = netdev_priv(dev);
4368
4369         bnx2_netif_stop(bp);
4370
4371         bp->vlgrp = vlgrp;
4372         bnx2_set_rx_mode(dev);
4373
4374         bnx2_netif_start(bp);
4375 }
4376
4377 /* Called with rtnl_lock */
4378 static void
4379 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4380 {
4381         struct bnx2 *bp = netdev_priv(dev);
4382
4383         bnx2_netif_stop(bp);
4384
4385         if (bp->vlgrp)
4386                 bp->vlgrp->vlan_devices[vid] = NULL;
4387         bnx2_set_rx_mode(dev);
4388
4389         bnx2_netif_start(bp);
4390 }
4391 #endif
4392
4393 /* Called with netif_tx_lock.
4394  * hard_start_xmit is pseudo-lockless - a lock is only required when
4395  * the tx queue is full. This way, we get the benefit of lockless
4396  * operations most of the time without the complexities to handle
4397  * netif_stop_queue/wake_queue race conditions.
4398  */
4399 static int
4400 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4401 {
4402         struct bnx2 *bp = netdev_priv(dev);
4403         dma_addr_t mapping;
4404         struct tx_bd *txbd;
4405         struct sw_bd *tx_buf;
4406         u32 len, vlan_tag_flags, last_frag, mss;
4407         u16 prod, ring_prod;
4408         int i;
4409
4410         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4411                 netif_stop_queue(dev);
4412                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4413                         dev->name);
4414
4415                 return NETDEV_TX_BUSY;
4416         }
4417         len = skb_headlen(skb);
4418         prod = bp->tx_prod;
4419         ring_prod = TX_RING_IDX(prod);
4420
4421         vlan_tag_flags = 0;
4422         if (skb->ip_summed == CHECKSUM_HW) {
4423                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4424         }
4425
4426         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4427                 vlan_tag_flags |=
4428                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4429         }
4430 #ifdef BCM_TSO 
4431         if ((mss = skb_shinfo(skb)->gso_size) &&
4432                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4433                 u32 tcp_opt_len, ip_tcp_len;
4434
4435                 if (skb_header_cloned(skb) &&
4436                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4437                         dev_kfree_skb(skb);
4438                         return NETDEV_TX_OK;
4439                 }
4440
4441                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4442                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4443
4444                 tcp_opt_len = 0;
4445                 if (skb->h.th->doff > 5) {
4446                         tcp_opt_len = (skb->h.th->doff - 5) << 2;
4447                 }
4448                 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4449
4450                 skb->nh.iph->check = 0;
4451                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4452                 skb->h.th->check =
4453                         ~csum_tcpudp_magic(skb->nh.iph->saddr,
4454                                             skb->nh.iph->daddr,
4455                                             0, IPPROTO_TCP, 0);
4456
4457                 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4458                         vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4459                                 (tcp_opt_len >> 2)) << 8;
4460                 }
4461         }
4462         else
4463 #endif
4464         {
4465                 mss = 0;
4466         }
4467
4468         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4469         
4470         tx_buf = &bp->tx_buf_ring[ring_prod];
4471         tx_buf->skb = skb;
4472         pci_unmap_addr_set(tx_buf, mapping, mapping);
4473
4474         txbd = &bp->tx_desc_ring[ring_prod];
4475
4476         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4477         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4478         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4479         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4480
4481         last_frag = skb_shinfo(skb)->nr_frags;
4482
4483         for (i = 0; i < last_frag; i++) {
4484                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4485
4486                 prod = NEXT_TX_BD(prod);
4487                 ring_prod = TX_RING_IDX(prod);
4488                 txbd = &bp->tx_desc_ring[ring_prod];
4489
4490                 len = frag->size;
4491                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4492                         len, PCI_DMA_TODEVICE);
4493                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4494                                 mapping, mapping);
4495
4496                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4497                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4498                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4499                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4500
4501         }
4502         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4503
4504         prod = NEXT_TX_BD(prod);
4505         bp->tx_prod_bseq += skb->len;
4506
4507         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4508         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4509
4510         mmiowb();
4511
4512         bp->tx_prod = prod;
4513         dev->trans_start = jiffies;
4514
4515         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4516                 spin_lock(&bp->tx_lock);
4517                 netif_stop_queue(dev);
4518                 
4519                 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4520                         netif_wake_queue(dev);
4521                 spin_unlock(&bp->tx_lock);
4522         }
4523
4524         return NETDEV_TX_OK;
4525 }
4526
4527 /* Called with rtnl_lock */
4528 static int
4529 bnx2_close(struct net_device *dev)
4530 {
4531         struct bnx2 *bp = netdev_priv(dev);
4532         u32 reset_code;
4533
4534         /* Calling flush_scheduled_work() may deadlock because
4535          * linkwatch_event() may be on the workqueue and it will try to get
4536          * the rtnl_lock which we are holding.
4537          */
4538         while (bp->in_reset_task)
4539                 msleep(1);
4540
4541         bnx2_netif_stop(bp);
4542         del_timer_sync(&bp->timer);
4543         if (bp->flags & NO_WOL_FLAG)
4544                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4545         else if (bp->wol)
4546                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4547         else
4548                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4549         bnx2_reset_chip(bp, reset_code);
4550         free_irq(bp->pdev->irq, dev);
4551         if (bp->flags & USING_MSI_FLAG) {
4552                 pci_disable_msi(bp->pdev);
4553                 bp->flags &= ~USING_MSI_FLAG;
4554         }
4555         bnx2_free_skbs(bp);
4556         bnx2_free_mem(bp);
4557         bp->link_up = 0;
4558         netif_carrier_off(bp->dev);
4559         bnx2_set_power_state(bp, PCI_D3hot);
4560         return 0;
4561 }
4562
4563 #define GET_NET_STATS64(ctr)                                    \
4564         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4565         (unsigned long) (ctr##_lo)
4566
4567 #define GET_NET_STATS32(ctr)            \
4568         (ctr##_lo)
4569
4570 #if (BITS_PER_LONG == 64)
4571 #define GET_NET_STATS   GET_NET_STATS64
4572 #else
4573 #define GET_NET_STATS   GET_NET_STATS32
4574 #endif
4575
4576 static struct net_device_stats *
4577 bnx2_get_stats(struct net_device *dev)
4578 {
4579         struct bnx2 *bp = netdev_priv(dev);
4580         struct statistics_block *stats_blk = bp->stats_blk;
4581         struct net_device_stats *net_stats = &bp->net_stats;
4582
4583         if (bp->stats_blk == NULL) {
4584                 return net_stats;
4585         }
4586         net_stats->rx_packets =
4587                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4588                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4589                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4590
4591         net_stats->tx_packets =
4592                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4593                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4594                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4595
4596         net_stats->rx_bytes =
4597                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4598
4599         net_stats->tx_bytes =
4600                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4601
4602         net_stats->multicast = 
4603                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4604
4605         net_stats->collisions = 
4606                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4607
4608         net_stats->rx_length_errors = 
4609                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4610                 stats_blk->stat_EtherStatsOverrsizePkts);
4611
4612         net_stats->rx_over_errors = 
4613                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4614
4615         net_stats->rx_frame_errors = 
4616                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4617
4618         net_stats->rx_crc_errors = 
4619                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4620
4621         net_stats->rx_errors = net_stats->rx_length_errors +
4622                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4623                 net_stats->rx_crc_errors;
4624
4625         net_stats->tx_aborted_errors =
4626                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4627                 stats_blk->stat_Dot3StatsLateCollisions);
4628
4629         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4630             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4631                 net_stats->tx_carrier_errors = 0;
4632         else {
4633                 net_stats->tx_carrier_errors =
4634                         (unsigned long)
4635                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4636         }
4637
4638         net_stats->tx_errors =
4639                 (unsigned long) 
4640                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4641                 +
4642                 net_stats->tx_aborted_errors +
4643                 net_stats->tx_carrier_errors;
4644
4645         net_stats->rx_missed_errors =
4646                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4647                 stats_blk->stat_FwRxDrop);
4648
4649         return net_stats;
4650 }
4651
4652 /* All ethtool functions called with rtnl_lock */
4653
4654 static int
4655 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4656 {
4657         struct bnx2 *bp = netdev_priv(dev);
4658
4659         cmd->supported = SUPPORTED_Autoneg;
4660         if (bp->phy_flags & PHY_SERDES_FLAG) {
4661                 cmd->supported |= SUPPORTED_1000baseT_Full |
4662                         SUPPORTED_FIBRE;
4663
4664                 cmd->port = PORT_FIBRE;
4665         }
4666         else {
4667                 cmd->supported |= SUPPORTED_10baseT_Half |
4668                         SUPPORTED_10baseT_Full |
4669                         SUPPORTED_100baseT_Half |
4670                         SUPPORTED_100baseT_Full |
4671                         SUPPORTED_1000baseT_Full |
4672                         SUPPORTED_TP;
4673
4674                 cmd->port = PORT_TP;
4675         }
4676
4677         cmd->advertising = bp->advertising;
4678
4679         if (bp->autoneg & AUTONEG_SPEED) {
4680                 cmd->autoneg = AUTONEG_ENABLE;
4681         }
4682         else {
4683                 cmd->autoneg = AUTONEG_DISABLE;
4684         }
4685
4686         if (netif_carrier_ok(dev)) {
4687                 cmd->speed = bp->line_speed;
4688                 cmd->duplex = bp->duplex;
4689         }
4690         else {
4691                 cmd->speed = -1;
4692                 cmd->duplex = -1;
4693         }
4694
4695         cmd->transceiver = XCVR_INTERNAL;
4696         cmd->phy_address = bp->phy_addr;
4697
4698         return 0;
4699 }
4700   
4701 static int
4702 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4703 {
4704         struct bnx2 *bp = netdev_priv(dev);
4705         u8 autoneg = bp->autoneg;
4706         u8 req_duplex = bp->req_duplex;
4707         u16 req_line_speed = bp->req_line_speed;
4708         u32 advertising = bp->advertising;
4709
4710         if (cmd->autoneg == AUTONEG_ENABLE) {
4711                 autoneg |= AUTONEG_SPEED;
4712
4713                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED; 
4714
4715                 /* allow advertising 1 speed */
4716                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4717                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4718                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4719                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4720
4721                         if (bp->phy_flags & PHY_SERDES_FLAG)
4722                                 return -EINVAL;
4723
4724                         advertising = cmd->advertising;
4725
4726                 }
4727                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4728                         advertising = cmd->advertising;
4729                 }
4730                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4731                         return -EINVAL;
4732                 }
4733                 else {
4734                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4735                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4736                         }
4737                         else {
4738                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4739                         }
4740                 }
4741                 advertising |= ADVERTISED_Autoneg;
4742         }
4743         else {
4744                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4745                         if ((cmd->speed != SPEED_1000) ||
4746                                 (cmd->duplex != DUPLEX_FULL)) {
4747                                 return -EINVAL;
4748                         }
4749                 }
4750                 else if (cmd->speed == SPEED_1000) {
4751                         return -EINVAL;
4752                 }
4753                 autoneg &= ~AUTONEG_SPEED;
4754                 req_line_speed = cmd->speed;
4755                 req_duplex = cmd->duplex;
4756                 advertising = 0;
4757         }
4758
4759         bp->autoneg = autoneg;
4760         bp->advertising = advertising;
4761         bp->req_line_speed = req_line_speed;
4762         bp->req_duplex = req_duplex;
4763
4764         spin_lock_bh(&bp->phy_lock);
4765
4766         bnx2_setup_phy(bp);
4767
4768         spin_unlock_bh(&bp->phy_lock);
4769
4770         return 0;
4771 }
4772
4773 static void
4774 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4775 {
4776         struct bnx2 *bp = netdev_priv(dev);
4777
4778         strcpy(info->driver, DRV_MODULE_NAME);
4779         strcpy(info->version, DRV_MODULE_VERSION);
4780         strcpy(info->bus_info, pci_name(bp->pdev));
4781         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4782         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4783         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4784         info->fw_version[1] = info->fw_version[3] = '.';
4785         info->fw_version[5] = 0;
4786 }
4787
4788 #define BNX2_REGDUMP_LEN                (32 * 1024)
4789
4790 static int
4791 bnx2_get_regs_len(struct net_device *dev)
4792 {
4793         return BNX2_REGDUMP_LEN;
4794 }
4795
4796 static void
4797 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4798 {
4799         u32 *p = _p, i, offset;
4800         u8 *orig_p = _p;
4801         struct bnx2 *bp = netdev_priv(dev);
4802         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4803                                  0x0800, 0x0880, 0x0c00, 0x0c10,
4804                                  0x0c30, 0x0d08, 0x1000, 0x101c,
4805                                  0x1040, 0x1048, 0x1080, 0x10a4,
4806                                  0x1400, 0x1490, 0x1498, 0x14f0,
4807                                  0x1500, 0x155c, 0x1580, 0x15dc,
4808                                  0x1600, 0x1658, 0x1680, 0x16d8,
4809                                  0x1800, 0x1820, 0x1840, 0x1854,
4810                                  0x1880, 0x1894, 0x1900, 0x1984,
4811                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4812                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
4813                                  0x2000, 0x2030, 0x23c0, 0x2400,
4814                                  0x2800, 0x2820, 0x2830, 0x2850,
4815                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
4816                                  0x3c00, 0x3c94, 0x4000, 0x4010,
4817                                  0x4080, 0x4090, 0x43c0, 0x4458,
4818                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
4819                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
4820                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
4821                                  0x5fc0, 0x6000, 0x6400, 0x6428,
4822                                  0x6800, 0x6848, 0x684c, 0x6860,
4823                                  0x6888, 0x6910, 0x8000 };
4824
4825         regs->version = 0;
4826
4827         memset(p, 0, BNX2_REGDUMP_LEN);
4828
4829         if (!netif_running(bp->dev))
4830                 return;
4831
4832         i = 0;
4833         offset = reg_boundaries[0];
4834         p += offset;
4835         while (offset < BNX2_REGDUMP_LEN) {
4836                 *p++ = REG_RD(bp, offset);
4837                 offset += 4;
4838                 if (offset == reg_boundaries[i + 1]) {
4839                         offset = reg_boundaries[i + 2];
4840                         p = (u32 *) (orig_p + offset);
4841                         i += 2;
4842                 }
4843         }
4844 }
4845
4846 static void
4847 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4848 {
4849         struct bnx2 *bp = netdev_priv(dev);
4850
4851         if (bp->flags & NO_WOL_FLAG) {
4852                 wol->supported = 0;
4853                 wol->wolopts = 0;
4854         }
4855         else {
4856                 wol->supported = WAKE_MAGIC;
4857                 if (bp->wol)
4858                         wol->wolopts = WAKE_MAGIC;
4859                 else
4860                         wol->wolopts = 0;
4861         }
4862         memset(&wol->sopass, 0, sizeof(wol->sopass));
4863 }
4864
4865 static int
4866 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4867 {
4868         struct bnx2 *bp = netdev_priv(dev);
4869
4870         if (wol->wolopts & ~WAKE_MAGIC)
4871                 return -EINVAL;
4872
4873         if (wol->wolopts & WAKE_MAGIC) {
4874                 if (bp->flags & NO_WOL_FLAG)
4875                         return -EINVAL;
4876
4877                 bp->wol = 1;
4878         }
4879         else {
4880                 bp->wol = 0;
4881         }
4882         return 0;
4883 }
4884
4885 static int
4886 bnx2_nway_reset(struct net_device *dev)
4887 {
4888         struct bnx2 *bp = netdev_priv(dev);
4889         u32 bmcr;
4890
4891         if (!(bp->autoneg & AUTONEG_SPEED)) {
4892                 return -EINVAL;
4893         }
4894
4895         spin_lock_bh(&bp->phy_lock);
4896
4897         /* Force a link down visible on the other side */
4898         if (bp->phy_flags & PHY_SERDES_FLAG) {
4899                 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4900                 spin_unlock_bh(&bp->phy_lock);
4901
4902                 msleep(20);
4903
4904                 spin_lock_bh(&bp->phy_lock);
4905                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4906                         bp->current_interval = SERDES_AN_TIMEOUT;
4907                         bp->serdes_an_pending = 1;
4908                         mod_timer(&bp->timer, jiffies + bp->current_interval);
4909                 }
4910         }
4911
4912         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4913         bmcr &= ~BMCR_LOOPBACK;
4914         bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4915
4916         spin_unlock_bh(&bp->phy_lock);
4917
4918         return 0;
4919 }
4920
4921 static int
4922 bnx2_get_eeprom_len(struct net_device *dev)
4923 {
4924         struct bnx2 *bp = netdev_priv(dev);
4925
4926         if (bp->flash_info == NULL)
4927                 return 0;
4928
4929         return (int) bp->flash_size;
4930 }
4931
4932 static int
4933 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4934                 u8 *eebuf)
4935 {
4936         struct bnx2 *bp = netdev_priv(dev);
4937         int rc;
4938
4939         /* parameters already validated in ethtool_get_eeprom */
4940
4941         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4942
4943         return rc;
4944 }
4945
4946 static int
4947 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4948                 u8 *eebuf)
4949 {
4950         struct bnx2 *bp = netdev_priv(dev);
4951         int rc;
4952
4953         /* parameters already validated in ethtool_set_eeprom */
4954
4955         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4956
4957         return rc;
4958 }
4959
4960 static int
4961 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4962 {
4963         struct bnx2 *bp = netdev_priv(dev);
4964
4965         memset(coal, 0, sizeof(struct ethtool_coalesce));
4966
4967         coal->rx_coalesce_usecs = bp->rx_ticks;
4968         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4969         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4970         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4971
4972         coal->tx_coalesce_usecs = bp->tx_ticks;
4973         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4974         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4975         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4976
4977         coal->stats_block_coalesce_usecs = bp->stats_ticks;
4978
4979         return 0;
4980 }
4981
4982 static int
4983 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4984 {
4985         struct bnx2 *bp = netdev_priv(dev);
4986
4987         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4988         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4989
4990         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; 
4991         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4992
4993         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4994         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4995
4996         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4997         if (bp->rx_quick_cons_trip_int > 0xff)
4998                 bp->rx_quick_cons_trip_int = 0xff;
4999
5000         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5001         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5002
5003         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5004         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5005
5006         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5007         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5008
5009         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5010         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5011                 0xff;
5012
5013         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5014         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5015         bp->stats_ticks &= 0xffff00;
5016
5017         if (netif_running(bp->dev)) {
5018                 bnx2_netif_stop(bp);
5019                 bnx2_init_nic(bp);
5020                 bnx2_netif_start(bp);
5021         }
5022
5023         return 0;
5024 }
5025
5026 static void
5027 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5028 {
5029         struct bnx2 *bp = netdev_priv(dev);
5030
5031         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5032         ering->rx_mini_max_pending = 0;
5033         ering->rx_jumbo_max_pending = 0;
5034
5035         ering->rx_pending = bp->rx_ring_size;
5036         ering->rx_mini_pending = 0;
5037         ering->rx_jumbo_pending = 0;
5038
5039         ering->tx_max_pending = MAX_TX_DESC_CNT;
5040         ering->tx_pending = bp->tx_ring_size;
5041 }
5042
5043 static int
5044 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5045 {
5046         struct bnx2 *bp = netdev_priv(dev);
5047
5048         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5049                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5050                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5051
5052                 return -EINVAL;
5053         }
5054         if (netif_running(bp->dev)) {
5055                 bnx2_netif_stop(bp);
5056                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5057                 bnx2_free_skbs(bp);
5058                 bnx2_free_mem(bp);
5059         }
5060
5061         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5062         bp->tx_ring_size = ering->tx_pending;
5063
5064         if (netif_running(bp->dev)) {
5065                 int rc;
5066
5067                 rc = bnx2_alloc_mem(bp);
5068                 if (rc)
5069                         return rc;
5070                 bnx2_init_nic(bp);
5071                 bnx2_netif_start(bp);
5072         }
5073
5074         return 0;
5075 }
5076
5077 static void
5078 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5079 {
5080         struct bnx2 *bp = netdev_priv(dev);
5081
5082         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5083         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5084         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5085 }
5086
5087 static int
5088 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5089 {
5090         struct bnx2 *bp = netdev_priv(dev);
5091
5092         bp->req_flow_ctrl = 0;
5093         if (epause->rx_pause)
5094                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5095         if (epause->tx_pause)
5096                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5097
5098         if (epause->autoneg) {
5099                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5100         }
5101         else {
5102                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5103         }
5104
5105         spin_lock_bh(&bp->phy_lock);
5106
5107         bnx2_setup_phy(bp);
5108
5109         spin_unlock_bh(&bp->phy_lock);
5110
5111         return 0;
5112 }
5113
5114 static u32
5115 bnx2_get_rx_csum(struct net_device *dev)
5116 {
5117         struct bnx2 *bp = netdev_priv(dev);
5118
5119         return bp->rx_csum;
5120 }
5121
5122 static int
5123 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5124 {
5125         struct bnx2 *bp = netdev_priv(dev);
5126
5127         bp->rx_csum = data;
5128         return 0;
5129 }
5130
5131 static int
5132 bnx2_set_tso(struct net_device *dev, u32 data)
5133 {
5134         if (data)
5135                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5136         else
5137                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5138         return 0;
5139 }
5140
5141 #define BNX2_NUM_STATS 46
5142
5143 static struct {
5144         char string[ETH_GSTRING_LEN];
5145 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5146         { "rx_bytes" },
5147         { "rx_error_bytes" },
5148         { "tx_bytes" },
5149         { "tx_error_bytes" },
5150         { "rx_ucast_packets" },
5151         { "rx_mcast_packets" },
5152         { "rx_bcast_packets" },
5153         { "tx_ucast_packets" },
5154         { "tx_mcast_packets" },
5155         { "tx_bcast_packets" },
5156         { "tx_mac_errors" },
5157         { "tx_carrier_errors" },
5158         { "rx_crc_errors" },
5159         { "rx_align_errors" },
5160         { "tx_single_collisions" },
5161         { "tx_multi_collisions" },
5162         { "tx_deferred" },
5163         { "tx_excess_collisions" },
5164         { "tx_late_collisions" },
5165         { "tx_total_collisions" },
5166         { "rx_fragments" },
5167         { "rx_jabbers" },
5168         { "rx_undersize_packets" },
5169         { "rx_oversize_packets" },
5170         { "rx_64_byte_packets" },
5171         { "rx_65_to_127_byte_packets" },
5172         { "rx_128_to_255_byte_packets" },
5173         { "rx_256_to_511_byte_packets" },
5174         { "rx_512_to_1023_byte_packets" },
5175         { "rx_1024_to_1522_byte_packets" },
5176         { "rx_1523_to_9022_byte_packets" },
5177         { "tx_64_byte_packets" },
5178         { "tx_65_to_127_byte_packets" },
5179         { "tx_128_to_255_byte_packets" },
5180         { "tx_256_to_511_byte_packets" },
5181         { "tx_512_to_1023_byte_packets" },
5182         { "tx_1024_to_1522_byte_packets" },
5183         { "tx_1523_to_9022_byte_packets" },
5184         { "rx_xon_frames" },
5185         { "rx_xoff_frames" },
5186         { "tx_xon_frames" },
5187         { "tx_xoff_frames" },
5188         { "rx_mac_ctrl_frames" },
5189         { "rx_filtered_packets" },
5190         { "rx_discards" },
5191         { "rx_fw_discards" },
5192 };
5193
5194 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5195
5196 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5197     STATS_OFFSET32(stat_IfHCInOctets_hi),
5198     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5199     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5200     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5201     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5202     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5203     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5204     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5205     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5206     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5207     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5208     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),                 
5209     STATS_OFFSET32(stat_Dot3StatsFCSErrors),                          
5210     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),                    
5211     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),              
5212     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),            
5213     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),              
5214     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),                
5215     STATS_OFFSET32(stat_Dot3StatsLateCollisions),                     
5216     STATS_OFFSET32(stat_EtherStatsCollisions),                        
5217     STATS_OFFSET32(stat_EtherStatsFragments),                         
5218     STATS_OFFSET32(stat_EtherStatsJabbers),                           
5219     STATS_OFFSET32(stat_EtherStatsUndersizePkts),                     
5220     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),                     
5221     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),                    
5222     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),         
5223     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),        
5224     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),        
5225     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),       
5226     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),      
5227     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),      
5228     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),                    
5229     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),         
5230     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),        
5231     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),        
5232     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),       
5233     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),      
5234     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),      
5235     STATS_OFFSET32(stat_XonPauseFramesReceived),                      
5236     STATS_OFFSET32(stat_XoffPauseFramesReceived),                     
5237     STATS_OFFSET32(stat_OutXonSent),                                  
5238     STATS_OFFSET32(stat_OutXoffSent),                                 
5239     STATS_OFFSET32(stat_MacControlFramesReceived),                    
5240     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),                  
5241     STATS_OFFSET32(stat_IfInMBUFDiscards),                            
5242     STATS_OFFSET32(stat_FwRxDrop),
5243 };
5244
5245 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5246  * skipped because of errata.
5247  */               
5248 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5249         8,0,8,8,8,8,8,8,8,8,
5250         4,0,4,4,4,4,4,4,4,4,
5251         4,4,4,4,4,4,4,4,4,4,
5252         4,4,4,4,4,4,4,4,4,4,
5253         4,4,4,4,4,4,
5254 };
5255
5256 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5257         8,0,8,8,8,8,8,8,8,8,
5258         4,4,4,4,4,4,4,4,4,4,
5259         4,4,4,4,4,4,4,4,4,4,
5260         4,4,4,4,4,4,4,4,4,4,
5261         4,4,4,4,4,4,
5262 };
5263
5264 #define BNX2_NUM_TESTS 6
5265
5266 static struct {
5267         char string[ETH_GSTRING_LEN];
5268 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5269         { "register_test (offline)" },
5270         { "memory_test (offline)" },
5271         { "loopback_test (offline)" },
5272         { "nvram_test (online)" },
5273         { "interrupt_test (online)" },
5274         { "link_test (online)" },
5275 };
5276
5277 static int
5278 bnx2_self_test_count(struct net_device *dev)
5279 {
5280         return BNX2_NUM_TESTS;
5281 }
5282
5283 static void
5284 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5285 {
5286         struct bnx2 *bp = netdev_priv(dev);
5287
5288         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5289         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5290                 bnx2_netif_stop(bp);
5291                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5292                 bnx2_free_skbs(bp);
5293
5294                 if (bnx2_test_registers(bp) != 0) {
5295                         buf[0] = 1;
5296                         etest->flags |= ETH_TEST_FL_FAILED;
5297                 }
5298                 if (bnx2_test_memory(bp) != 0) {
5299                         buf[1] = 1;
5300                         etest->flags |= ETH_TEST_FL_FAILED;
5301                 }
5302                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5303                         etest->flags |= ETH_TEST_FL_FAILED;
5304
5305                 if (!netif_running(bp->dev)) {
5306                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5307                 }
5308                 else {
5309                         bnx2_init_nic(bp);
5310                         bnx2_netif_start(bp);
5311                 }
5312
5313                 /* wait for link up */
5314                 msleep_interruptible(3000);
5315                 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5316                         msleep_interruptible(4000);
5317         }
5318
5319         if (bnx2_test_nvram(bp) != 0) {
5320                 buf[3] = 1;
5321                 etest->flags |= ETH_TEST_FL_FAILED;
5322         }
5323         if (bnx2_test_intr(bp) != 0) {
5324                 buf[4] = 1;
5325                 etest->flags |= ETH_TEST_FL_FAILED;
5326         }
5327
5328         if (bnx2_test_link(bp) != 0) {
5329                 buf[5] = 1;
5330                 etest->flags |= ETH_TEST_FL_FAILED;
5331
5332         }
5333 }
5334
5335 static void
5336 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5337 {
5338         switch (stringset) {
5339         case ETH_SS_STATS:
5340                 memcpy(buf, bnx2_stats_str_arr,
5341                         sizeof(bnx2_stats_str_arr));
5342                 break;
5343         case ETH_SS_TEST:
5344                 memcpy(buf, bnx2_tests_str_arr,
5345                         sizeof(bnx2_tests_str_arr));
5346                 break;
5347         }
5348 }
5349
5350 static int
5351 bnx2_get_stats_count(struct net_device *dev)
5352 {
5353         return BNX2_NUM_STATS;
5354 }
5355
5356 static void
5357 bnx2_get_ethtool_stats(struct net_device *dev,
5358                 struct ethtool_stats *stats, u64 *buf)
5359 {
5360         struct bnx2 *bp = netdev_priv(dev);
5361         int i;
5362         u32 *hw_stats = (u32 *) bp->stats_blk;
5363         u8 *stats_len_arr = NULL;
5364
5365         if (hw_stats == NULL) {
5366                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5367                 return;
5368         }
5369
5370         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5371             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5372             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5373             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5374                 stats_len_arr = bnx2_5706_stats_len_arr;
5375         else
5376                 stats_len_arr = bnx2_5708_stats_len_arr;
5377
5378         for (i = 0; i < BNX2_NUM_STATS; i++) {
5379                 if (stats_len_arr[i] == 0) {
5380                         /* skip this counter */
5381                         buf[i] = 0;
5382                         continue;
5383                 }
5384                 if (stats_len_arr[i] == 4) {
5385                         /* 4-byte counter */
5386                         buf[i] = (u64)
5387                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5388                         continue;
5389                 }
5390                 /* 8-byte counter */
5391                 buf[i] = (((u64) *(hw_stats +
5392                                         bnx2_stats_offset_arr[i])) << 32) +
5393                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5394         }
5395 }
5396
5397 static int
5398 bnx2_phys_id(struct net_device *dev, u32 data)
5399 {
5400         struct bnx2 *bp = netdev_priv(dev);
5401         int i;
5402         u32 save;
5403
5404         if (data == 0)
5405                 data = 2;
5406
5407         save = REG_RD(bp, BNX2_MISC_CFG);
5408         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5409
5410         for (i = 0; i < (data * 2); i++) {
5411                 if ((i % 2) == 0) {
5412                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5413                 }
5414                 else {
5415                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5416                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5417                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5418                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5419                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5420                                 BNX2_EMAC_LED_TRAFFIC);
5421                 }
5422                 msleep_interruptible(500);
5423                 if (signal_pending(current))
5424                         break;
5425         }
5426         REG_WR(bp, BNX2_EMAC_LED, 0);
5427         REG_WR(bp, BNX2_MISC_CFG, save);
5428         return 0;
5429 }
5430
5431 static struct ethtool_ops bnx2_ethtool_ops = {
5432         .get_settings           = bnx2_get_settings,
5433         .set_settings           = bnx2_set_settings,
5434         .get_drvinfo            = bnx2_get_drvinfo,
5435         .get_regs_len           = bnx2_get_regs_len,
5436         .get_regs               = bnx2_get_regs,
5437         .get_wol                = bnx2_get_wol,
5438         .set_wol                = bnx2_set_wol,
5439         .nway_reset             = bnx2_nway_reset,
5440         .get_link               = ethtool_op_get_link,
5441         .get_eeprom_len         = bnx2_get_eeprom_len,
5442         .get_eeprom             = bnx2_get_eeprom,
5443         .set_eeprom             = bnx2_set_eeprom,
5444         .get_coalesce           = bnx2_get_coalesce,
5445         .set_coalesce           = bnx2_set_coalesce,
5446         .get_ringparam          = bnx2_get_ringparam,
5447         .set_ringparam          = bnx2_set_ringparam,
5448         .get_pauseparam         = bnx2_get_pauseparam,
5449         .set_pauseparam         = bnx2_set_pauseparam,
5450         .get_rx_csum            = bnx2_get_rx_csum,
5451         .set_rx_csum            = bnx2_set_rx_csum,
5452         .get_tx_csum            = ethtool_op_get_tx_csum,
5453         .set_tx_csum            = ethtool_op_set_tx_csum,
5454         .get_sg                 = ethtool_op_get_sg,
5455         .set_sg                 = ethtool_op_set_sg,
5456 #ifdef BCM_TSO
5457         .get_tso                = ethtool_op_get_tso,
5458         .set_tso                = bnx2_set_tso,
5459 #endif
5460         .self_test_count        = bnx2_self_test_count,
5461         .self_test              = bnx2_self_test,
5462         .get_strings            = bnx2_get_strings,
5463         .phys_id                = bnx2_phys_id,
5464         .get_stats_count        = bnx2_get_stats_count,
5465         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5466         .get_perm_addr          = ethtool_op_get_perm_addr,
5467 };
5468
5469 /* Called with rtnl_lock */
5470 static int
5471 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5472 {
5473         struct mii_ioctl_data *data = if_mii(ifr);
5474         struct bnx2 *bp = netdev_priv(dev);
5475         int err;
5476
5477         switch(cmd) {
5478         case SIOCGMIIPHY:
5479                 data->phy_id = bp->phy_addr;
5480
5481                 /* fallthru */
5482         case SIOCGMIIREG: {
5483                 u32 mii_regval;
5484
5485                 spin_lock_bh(&bp->phy_lock);
5486                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5487                 spin_unlock_bh(&bp->phy_lock);
5488
5489                 data->val_out = mii_regval;
5490
5491                 return err;
5492         }
5493
5494         case SIOCSMIIREG:
5495                 if (!capable(CAP_NET_ADMIN))
5496                         return -EPERM;
5497
5498                 spin_lock_bh(&bp->phy_lock);
5499                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5500                 spin_unlock_bh(&bp->phy_lock);
5501
5502                 return err;
5503
5504         default:
5505                 /* do nothing */
5506                 break;
5507         }
5508         return -EOPNOTSUPP;
5509 }
5510
5511 /* Called with rtnl_lock */
5512 static int
5513 bnx2_change_mac_addr(struct net_device *dev, void *p)
5514 {
5515         struct sockaddr *addr = p;
5516         struct bnx2 *bp = netdev_priv(dev);
5517
5518         if (!is_valid_ether_addr(addr->sa_data))
5519                 return -EINVAL;
5520
5521         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5522         if (netif_running(dev))
5523                 bnx2_set_mac_addr(bp);
5524
5525         return 0;
5526 }
5527
5528 /* Called with rtnl_lock */
5529 static int
5530 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5531 {
5532         struct bnx2 *bp = netdev_priv(dev);
5533
5534         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5535                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5536                 return -EINVAL;
5537
5538         dev->mtu = new_mtu;
5539         if (netif_running(dev)) {
5540                 bnx2_netif_stop(bp);
5541
5542                 bnx2_init_nic(bp);
5543
5544                 bnx2_netif_start(bp);
5545         }
5546         return 0;
5547 }
5548
5549 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5550 static void
5551 poll_bnx2(struct net_device *dev)
5552 {
5553         struct bnx2 *bp = netdev_priv(dev);
5554
5555         disable_irq(bp->pdev->irq);
5556         bnx2_interrupt(bp->pdev->irq, dev, NULL);
5557         enable_irq(bp->pdev->irq);
5558 }
5559 #endif
5560
5561 static int __devinit
5562 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5563 {
5564         struct bnx2 *bp;
5565         unsigned long mem_len;
5566         int rc;
5567         u32 reg;
5568
5569         SET_MODULE_OWNER(dev);
5570         SET_NETDEV_DEV(dev, &pdev->dev);
5571         bp = netdev_priv(dev);
5572
5573         bp->flags = 0;
5574         bp->phy_flags = 0;
5575
5576         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5577         rc = pci_enable_device(pdev);
5578         if (rc) {
5579                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5580                 goto err_out;
5581         }
5582
5583         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5584                 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5585                        "aborting.\n");
5586                 rc = -ENODEV;
5587                 goto err_out_disable;
5588         }
5589
5590         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5591         if (rc) {
5592                 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5593                 goto err_out_disable;
5594         }
5595
5596         pci_set_master(pdev);
5597
5598         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5599         if (bp->pm_cap == 0) {
5600                 printk(KERN_ERR PFX "Cannot find power management capability, "
5601                                "aborting.\n");
5602                 rc = -EIO;
5603                 goto err_out_release;
5604         }
5605
5606         bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5607         if (bp->pcix_cap == 0) {
5608                 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5609                 rc = -EIO;
5610                 goto err_out_release;
5611         }
5612
5613         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5614                 bp->flags |= USING_DAC_FLAG;
5615                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5616                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5617                                "failed, aborting.\n");
5618                         rc = -EIO;
5619                         goto err_out_release;
5620                 }
5621         }
5622         else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5623                 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5624                 rc = -EIO;
5625                 goto err_out_release;
5626         }
5627
5628         bp->dev = dev;
5629         bp->pdev = pdev;
5630
5631         spin_lock_init(&bp->phy_lock);
5632         spin_lock_init(&bp->tx_lock);
5633         INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5634
5635         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5636         mem_len = MB_GET_CID_ADDR(17);
5637         dev->mem_end = dev->mem_start + mem_len;
5638         dev->irq = pdev->irq;
5639
5640         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5641
5642         if (!bp->regview) {
5643                 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5644                 rc = -ENOMEM;
5645                 goto err_out_release;
5646         }
5647
5648         /* Configure byte swap and enable write to the reg_window registers.
5649          * Rely on CPU to do target byte swapping on big endian systems
5650          * The chip's target access swapping will not swap all accesses
5651          */
5652         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5653                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5654                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5655
5656         bnx2_set_power_state(bp, PCI_D0);
5657
5658         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5659
5660         /* Get bus information. */
5661         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5662         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5663                 u32 clkreg;
5664
5665                 bp->flags |= PCIX_FLAG;
5666
5667                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5668                 
5669                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5670                 switch (clkreg) {
5671                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5672                         bp->bus_speed_mhz = 133;
5673                         break;
5674
5675                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5676                         bp->bus_speed_mhz = 100;
5677                         break;
5678
5679                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5680                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5681                         bp->bus_speed_mhz = 66;
5682                         break;
5683
5684                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5685                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5686                         bp->bus_speed_mhz = 50;
5687                         break;
5688
5689                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5690                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5691                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5692                         bp->bus_speed_mhz = 33;
5693                         break;
5694                 }
5695         }
5696         else {
5697                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5698                         bp->bus_speed_mhz = 66;
5699                 else
5700                         bp->bus_speed_mhz = 33;
5701         }
5702
5703         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5704                 bp->flags |= PCI_32BIT_FLAG;
5705
5706         /* 5706A0 may falsely detect SERR and PERR. */
5707         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5708                 reg = REG_RD(bp, PCI_COMMAND);
5709                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5710                 REG_WR(bp, PCI_COMMAND, reg);
5711         }
5712         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5713                 !(bp->flags & PCIX_FLAG)) {
5714
5715                 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5716                        "aborting.\n");
5717                 goto err_out_unmap;
5718         }
5719
5720         bnx2_init_nvram(bp);
5721
5722         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5723
5724         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5725             BNX2_SHM_HDR_SIGNATURE_SIG)
5726                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5727         else
5728                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5729
5730         /* Get the permanent MAC address.  First we need to make sure the
5731          * firmware is actually running.
5732          */
5733         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5734
5735         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5736             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5737                 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5738                 rc = -ENODEV;
5739                 goto err_out_unmap;
5740         }
5741
5742         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5743
5744         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5745         bp->mac_addr[0] = (u8) (reg >> 8);
5746         bp->mac_addr[1] = (u8) reg;
5747
5748         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5749         bp->mac_addr[2] = (u8) (reg >> 24);
5750         bp->mac_addr[3] = (u8) (reg >> 16);
5751         bp->mac_addr[4] = (u8) (reg >> 8);
5752         bp->mac_addr[5] = (u8) reg;
5753
5754         bp->tx_ring_size = MAX_TX_DESC_CNT;
5755         bnx2_set_rx_ring_size(bp, 100);
5756
5757         bp->rx_csum = 1;
5758
5759         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5760
5761         bp->tx_quick_cons_trip_int = 20;
5762         bp->tx_quick_cons_trip = 20;
5763         bp->tx_ticks_int = 80;
5764         bp->tx_ticks = 80;
5765                 
5766         bp->rx_quick_cons_trip_int = 6;
5767         bp->rx_quick_cons_trip = 6;
5768         bp->rx_ticks_int = 18;
5769         bp->rx_ticks = 18;
5770
5771         bp->stats_ticks = 1000000 & 0xffff00;
5772
5773         bp->timer_interval =  HZ;
5774         bp->current_interval =  HZ;
5775
5776         bp->phy_addr = 1;
5777
5778         /* Disable WOL support if we are running on a SERDES chip. */
5779         if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5780                 bp->phy_flags |= PHY_SERDES_FLAG;
5781                 bp->flags |= NO_WOL_FLAG;
5782                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5783                         bp->phy_addr = 2;
5784                         reg = REG_RD_IND(bp, bp->shmem_base +
5785                                          BNX2_SHARED_HW_CFG_CONFIG);
5786                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5787                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5788                 }
5789         }
5790
5791         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5792             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5793             (CHIP_ID(bp) == CHIP_ID_5708_B1))
5794                 bp->flags |= NO_WOL_FLAG;
5795
5796         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5797                 bp->tx_quick_cons_trip_int =
5798                         bp->tx_quick_cons_trip;
5799                 bp->tx_ticks_int = bp->tx_ticks;
5800                 bp->rx_quick_cons_trip_int =
5801                         bp->rx_quick_cons_trip;
5802                 bp->rx_ticks_int = bp->rx_ticks;
5803                 bp->comp_prod_trip_int = bp->comp_prod_trip;
5804                 bp->com_ticks_int = bp->com_ticks;
5805                 bp->cmd_ticks_int = bp->cmd_ticks;
5806         }
5807
5808         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5809         bp->req_line_speed = 0;
5810         if (bp->phy_flags & PHY_SERDES_FLAG) {
5811                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5812
5813                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5814                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5815                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5816                         bp->autoneg = 0;
5817                         bp->req_line_speed = bp->line_speed = SPEED_1000;
5818                         bp->req_duplex = DUPLEX_FULL;
5819                 }
5820         }
5821         else {
5822                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5823         }
5824
5825         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5826
5827         init_timer(&bp->timer);
5828         bp->timer.expires = RUN_AT(bp->timer_interval);
5829         bp->timer.data = (unsigned long) bp;
5830         bp->timer.function = bnx2_timer;
5831
5832         return 0;
5833
5834 err_out_unmap:
5835         if (bp->regview) {
5836                 iounmap(bp->regview);
5837                 bp->regview = NULL;
5838         }
5839
5840 err_out_release:
5841         pci_release_regions(pdev);
5842
5843 err_out_disable:
5844         pci_disable_device(pdev);
5845         pci_set_drvdata(pdev, NULL);
5846
5847 err_out:
5848         return rc;
5849 }
5850
5851 static int __devinit
5852 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5853 {
5854         static int version_printed = 0;
5855         struct net_device *dev = NULL;
5856         struct bnx2 *bp;
5857         int rc, i;
5858
5859         if (version_printed++ == 0)
5860                 printk(KERN_INFO "%s", version);
5861
5862         /* dev zeroed in init_etherdev */
5863         dev = alloc_etherdev(sizeof(*bp));
5864
5865         if (!dev)
5866                 return -ENOMEM;
5867
5868         rc = bnx2_init_board(pdev, dev);
5869         if (rc < 0) {
5870                 free_netdev(dev);
5871                 return rc;
5872         }
5873
5874         dev->open = bnx2_open;
5875         dev->hard_start_xmit = bnx2_start_xmit;
5876         dev->stop = bnx2_close;
5877         dev->get_stats = bnx2_get_stats;
5878         dev->set_multicast_list = bnx2_set_rx_mode;
5879         dev->do_ioctl = bnx2_ioctl;
5880         dev->set_mac_address = bnx2_change_mac_addr;
5881         dev->change_mtu = bnx2_change_mtu;
5882         dev->tx_timeout = bnx2_tx_timeout;
5883         dev->watchdog_timeo = TX_TIMEOUT;
5884 #ifdef BCM_VLAN
5885         dev->vlan_rx_register = bnx2_vlan_rx_register;
5886         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5887 #endif
5888         dev->poll = bnx2_poll;
5889         dev->ethtool_ops = &bnx2_ethtool_ops;
5890         dev->weight = 64;
5891
5892         bp = netdev_priv(dev);
5893
5894 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5895         dev->poll_controller = poll_bnx2;
5896 #endif
5897
5898         if ((rc = register_netdev(dev))) {
5899                 printk(KERN_ERR PFX "Cannot register net device\n");
5900                 if (bp->regview)
5901                         iounmap(bp->regview);
5902                 pci_release_regions(pdev);
5903                 pci_disable_device(pdev);
5904                 pci_set_drvdata(pdev, NULL);
5905                 free_netdev(dev);
5906                 return rc;
5907         }
5908
5909         pci_set_drvdata(pdev, dev);
5910
5911         memcpy(dev->dev_addr, bp->mac_addr, 6);
5912         memcpy(dev->perm_addr, bp->mac_addr, 6);
5913         bp->name = board_info[ent->driver_data].name,
5914         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5915                 "IRQ %d, ",
5916                 dev->name,
5917                 bp->name,
5918                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5919                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5920                 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5921                 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5922                 bp->bus_speed_mhz,
5923                 dev->base_addr,
5924                 bp->pdev->irq);
5925
5926         printk("node addr ");
5927         for (i = 0; i < 6; i++)
5928                 printk("%2.2x", dev->dev_addr[i]);
5929         printk("\n");
5930
5931         dev->features |= NETIF_F_SG;
5932         if (bp->flags & USING_DAC_FLAG)
5933                 dev->features |= NETIF_F_HIGHDMA;
5934         dev->features |= NETIF_F_IP_CSUM;
5935 #ifdef BCM_VLAN
5936         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5937 #endif
5938 #ifdef BCM_TSO
5939         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5940 #endif
5941
5942         netif_carrier_off(bp->dev);
5943
5944         return 0;
5945 }
5946
5947 static void __devexit
5948 bnx2_remove_one(struct pci_dev *pdev)
5949 {
5950         struct net_device *dev = pci_get_drvdata(pdev);
5951         struct bnx2 *bp = netdev_priv(dev);
5952
5953         flush_scheduled_work();
5954
5955         unregister_netdev(dev);
5956
5957         if (bp->regview)
5958                 iounmap(bp->regview);
5959
5960         free_netdev(dev);
5961         pci_release_regions(pdev);
5962         pci_disable_device(pdev);
5963         pci_set_drvdata(pdev, NULL);
5964 }
5965
5966 static int
5967 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5968 {
5969         struct net_device *dev = pci_get_drvdata(pdev);
5970         struct bnx2 *bp = netdev_priv(dev);
5971         u32 reset_code;
5972
5973         if (!netif_running(dev))
5974                 return 0;
5975
5976         flush_scheduled_work();
5977         bnx2_netif_stop(bp);
5978         netif_device_detach(dev);
5979         del_timer_sync(&bp->timer);
5980         if (bp->flags & NO_WOL_FLAG)
5981                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5982         else if (bp->wol)
5983                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5984         else
5985                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5986         bnx2_reset_chip(bp, reset_code);
5987         bnx2_free_skbs(bp);
5988         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5989         return 0;
5990 }
5991
5992 static int
5993 bnx2_resume(struct pci_dev *pdev)
5994 {
5995         struct net_device *dev = pci_get_drvdata(pdev);
5996         struct bnx2 *bp = netdev_priv(dev);
5997
5998         if (!netif_running(dev))
5999                 return 0;
6000
6001         bnx2_set_power_state(bp, PCI_D0);
6002         netif_device_attach(dev);
6003         bnx2_init_nic(bp);
6004         bnx2_netif_start(bp);
6005         return 0;
6006 }
6007
6008 static struct pci_driver bnx2_pci_driver = {
6009         .name           = DRV_MODULE_NAME,
6010         .id_table       = bnx2_pci_tbl,
6011         .probe          = bnx2_init_one,
6012         .remove         = __devexit_p(bnx2_remove_one),
6013         .suspend        = bnx2_suspend,
6014         .resume         = bnx2_resume,
6015 };
6016
6017 static int __init bnx2_init(void)
6018 {
6019         return pci_module_init(&bnx2_pci_driver);
6020 }
6021
6022 static void __exit bnx2_cleanup(void)
6023 {
6024         pci_unregister_driver(&bnx2_pci_driver);
6025 }
6026
6027 module_init(bnx2_init);
6028 module_exit(bnx2_cleanup);
6029
6030
6031