[BNX2]: Combine small mem allocations
[pandora-kernel.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #include "bnx2.h"
13 #include "bnx2_fw.h"
14
15 #define DRV_MODULE_NAME         "bnx2"
16 #define PFX DRV_MODULE_NAME     ": "
17 #define DRV_MODULE_VERSION      "1.4.38"
18 #define DRV_MODULE_RELDATE      "February 10, 2006"
19
20 #define RUN_AT(x) (jiffies + (x))
21
22 /* Time in jiffies before concluding the transmitter is hung. */
23 #define TX_TIMEOUT  (5*HZ)
24
25 static char version[] __devinitdata =
26         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
29 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_MODULE_VERSION);
32
33 static int disable_msi = 0;
34
35 module_param(disable_msi, int, 0);
36 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
37
38 typedef enum {
39         BCM5706 = 0,
40         NC370T,
41         NC370I,
42         BCM5706S,
43         NC370F,
44         BCM5708,
45         BCM5708S,
46 } board_t;
47
48 /* indexed by board_t, above */
49 static const struct {
50         char *name;
51 } board_info[] __devinitdata = {
52         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
53         { "HP NC370T Multifunction Gigabit Server Adapter" },
54         { "HP NC370i Multifunction Gigabit Server Adapter" },
55         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
56         { "HP NC370F Multifunction Gigabit Server Adapter" },
57         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
58         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
59         };
60
61 static struct pci_device_id bnx2_pci_tbl[] = {
62         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
63           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
64         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
65           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
66         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
67           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
68         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
69           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
70         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
71           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
72         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
73           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
74         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
75           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
76         { 0, }
77 };
78
79 static struct flash_spec flash_table[] =
80 {
81         /* Slow EEPROM */
82         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
83          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
84          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
85          "EEPROM - slow"},
86         /* Expansion entry 0001 */
87         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
88          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
89          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
90          "Entry 0001"},
91         /* Saifun SA25F010 (non-buffered flash) */
92         /* strap, cfg1, & write1 need updates */
93         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
94          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
95          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
96          "Non-buffered flash (128kB)"},
97         /* Saifun SA25F020 (non-buffered flash) */
98         /* strap, cfg1, & write1 need updates */
99         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
100          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
101          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
102          "Non-buffered flash (256kB)"},
103         /* Expansion entry 0100 */
104         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
105          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
106          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
107          "Entry 0100"},
108         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
109         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,        
110          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
111          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
112          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
113         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
114         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
115          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
116          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
117          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
118         /* Saifun SA25F005 (non-buffered flash) */
119         /* strap, cfg1, & write1 need updates */
120         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
121          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
122          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
123          "Non-buffered flash (64kB)"},
124         /* Fast EEPROM */
125         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
126          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128          "EEPROM - fast"},
129         /* Expansion entry 1001 */
130         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
131          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133          "Entry 1001"},
134         /* Expansion entry 1010 */
135         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138          "Entry 1010"},
139         /* ATMEL AT45DB011B (buffered flash) */
140         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
141          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
142          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
143          "Buffered flash (128kB)"},
144         /* Expansion entry 1100 */
145         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
146          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
148          "Entry 1100"},
149         /* Expansion entry 1101 */
150         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
151          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
153          "Entry 1101"},
154         /* Ateml Expansion entry 1110 */
155         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
156          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
157          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
158          "Entry 1110 (Atmel)"},
159         /* ATMEL AT45DB021B (buffered flash) */
160         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
161          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
162          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
163          "Buffered flash (256kB)"},
164 };
165
166 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
167
168 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
169 {
170         u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
171
172         if (diff > MAX_TX_DESC_CNT)
173                 diff = (diff & MAX_TX_DESC_CNT) - 1;
174         return (bp->tx_ring_size - diff);
175 }
176
177 static u32
178 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
179 {
180         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
181         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
182 }
183
184 static void
185 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
186 {
187         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
188         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
189 }
190
191 static void
192 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
193 {
194         offset += cid_addr;
195         REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
196         REG_WR(bp, BNX2_CTX_DATA, val);
197 }
198
199 static int
200 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
201 {
202         u32 val1;
203         int i, ret;
204
205         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
206                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
207                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
208
209                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
210                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
211
212                 udelay(40);
213         }
214
215         val1 = (bp->phy_addr << 21) | (reg << 16) |
216                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
217                 BNX2_EMAC_MDIO_COMM_START_BUSY;
218         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
219
220         for (i = 0; i < 50; i++) {
221                 udelay(10);
222
223                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
224                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
225                         udelay(5);
226
227                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
228                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
229
230                         break;
231                 }
232         }
233
234         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
235                 *val = 0x0;
236                 ret = -EBUSY;
237         }
238         else {
239                 *val = val1;
240                 ret = 0;
241         }
242
243         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
244                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
245                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
246
247                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
248                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
249
250                 udelay(40);
251         }
252
253         return ret;
254 }
255
256 static int
257 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
258 {
259         u32 val1;
260         int i, ret;
261
262         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
263                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
264                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
265
266                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
267                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
268
269                 udelay(40);
270         }
271
272         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
273                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
274                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
275         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
276     
277         for (i = 0; i < 50; i++) {
278                 udelay(10);
279
280                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
281                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
282                         udelay(5);
283                         break;
284                 }
285         }
286
287         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
288                 ret = -EBUSY;
289         else
290                 ret = 0;
291
292         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
293                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
294                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
295
296                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
297                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
298
299                 udelay(40);
300         }
301
302         return ret;
303 }
304
305 static void
306 bnx2_disable_int(struct bnx2 *bp)
307 {
308         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
309                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
310         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
311 }
312
313 static void
314 bnx2_enable_int(struct bnx2 *bp)
315 {
316         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
317                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
318                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
319
320         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
321                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
322
323         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
324 }
325
326 static void
327 bnx2_disable_int_sync(struct bnx2 *bp)
328 {
329         atomic_inc(&bp->intr_sem);
330         bnx2_disable_int(bp);
331         synchronize_irq(bp->pdev->irq);
332 }
333
334 static void
335 bnx2_netif_stop(struct bnx2 *bp)
336 {
337         bnx2_disable_int_sync(bp);
338         if (netif_running(bp->dev)) {
339                 netif_poll_disable(bp->dev);
340                 netif_tx_disable(bp->dev);
341                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
342         }
343 }
344
345 static void
346 bnx2_netif_start(struct bnx2 *bp)
347 {
348         if (atomic_dec_and_test(&bp->intr_sem)) {
349                 if (netif_running(bp->dev)) {
350                         netif_wake_queue(bp->dev);
351                         netif_poll_enable(bp->dev);
352                         bnx2_enable_int(bp);
353                 }
354         }
355 }
356
357 static void
358 bnx2_free_mem(struct bnx2 *bp)
359 {
360         int i;
361
362         if (bp->status_blk) {
363                 pci_free_consistent(bp->pdev, bp->status_stats_size,
364                                     bp->status_blk, bp->status_blk_mapping);
365                 bp->status_blk = NULL;
366                 bp->stats_blk = NULL;
367         }
368         if (bp->tx_desc_ring) {
369                 pci_free_consistent(bp->pdev,
370                                     sizeof(struct tx_bd) * TX_DESC_CNT,
371                                     bp->tx_desc_ring, bp->tx_desc_mapping);
372                 bp->tx_desc_ring = NULL;
373         }
374         kfree(bp->tx_buf_ring);
375         bp->tx_buf_ring = NULL;
376         for (i = 0; i < bp->rx_max_ring; i++) {
377                 if (bp->rx_desc_ring[i])
378                         pci_free_consistent(bp->pdev,
379                                             sizeof(struct rx_bd) * RX_DESC_CNT,
380                                             bp->rx_desc_ring[i],
381                                             bp->rx_desc_mapping[i]);
382                 bp->rx_desc_ring[i] = NULL;
383         }
384         vfree(bp->rx_buf_ring);
385         bp->rx_buf_ring = NULL;
386 }
387
388 static int
389 bnx2_alloc_mem(struct bnx2 *bp)
390 {
391         int i, status_blk_size;
392
393         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
394                                   GFP_KERNEL);
395         if (bp->tx_buf_ring == NULL)
396                 return -ENOMEM;
397
398         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
399                                                 sizeof(struct tx_bd) *
400                                                 TX_DESC_CNT,
401                                                 &bp->tx_desc_mapping);
402         if (bp->tx_desc_ring == NULL)
403                 goto alloc_mem_err;
404
405         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
406                                   bp->rx_max_ring);
407         if (bp->rx_buf_ring == NULL)
408                 goto alloc_mem_err;
409
410         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
411                                    bp->rx_max_ring);
412
413         for (i = 0; i < bp->rx_max_ring; i++) {
414                 bp->rx_desc_ring[i] =
415                         pci_alloc_consistent(bp->pdev,
416                                              sizeof(struct rx_bd) * RX_DESC_CNT,
417                                              &bp->rx_desc_mapping[i]);
418                 if (bp->rx_desc_ring[i] == NULL)
419                         goto alloc_mem_err;
420
421         }
422
423         /* Combine status and statistics blocks into one allocation. */
424         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
425         bp->status_stats_size = status_blk_size +
426                                 sizeof(struct statistics_block);
427
428         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
429                                               &bp->status_blk_mapping);
430         if (bp->status_blk == NULL)
431                 goto alloc_mem_err;
432
433         memset(bp->status_blk, 0, bp->status_stats_size);
434
435         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
436                                   status_blk_size);
437
438         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
439
440         return 0;
441
442 alloc_mem_err:
443         bnx2_free_mem(bp);
444         return -ENOMEM;
445 }
446
447 static void
448 bnx2_report_fw_link(struct bnx2 *bp)
449 {
450         u32 fw_link_status = 0;
451
452         if (bp->link_up) {
453                 u32 bmsr;
454
455                 switch (bp->line_speed) {
456                 case SPEED_10:
457                         if (bp->duplex == DUPLEX_HALF)
458                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
459                         else
460                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
461                         break;
462                 case SPEED_100:
463                         if (bp->duplex == DUPLEX_HALF)
464                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
465                         else
466                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
467                         break;
468                 case SPEED_1000:
469                         if (bp->duplex == DUPLEX_HALF)
470                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
471                         else
472                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
473                         break;
474                 case SPEED_2500:
475                         if (bp->duplex == DUPLEX_HALF)
476                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
477                         else
478                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
479                         break;
480                 }
481
482                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
483
484                 if (bp->autoneg) {
485                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
486
487                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
488                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
489
490                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
491                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
492                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
493                         else
494                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
495                 }
496         }
497         else
498                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
499
500         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
501 }
502
503 static void
504 bnx2_report_link(struct bnx2 *bp)
505 {
506         if (bp->link_up) {
507                 netif_carrier_on(bp->dev);
508                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
509
510                 printk("%d Mbps ", bp->line_speed);
511
512                 if (bp->duplex == DUPLEX_FULL)
513                         printk("full duplex");
514                 else
515                         printk("half duplex");
516
517                 if (bp->flow_ctrl) {
518                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
519                                 printk(", receive ");
520                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
521                                         printk("& transmit ");
522                         }
523                         else {
524                                 printk(", transmit ");
525                         }
526                         printk("flow control ON");
527                 }
528                 printk("\n");
529         }
530         else {
531                 netif_carrier_off(bp->dev);
532                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
533         }
534
535         bnx2_report_fw_link(bp);
536 }
537
538 static void
539 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
540 {
541         u32 local_adv, remote_adv;
542
543         bp->flow_ctrl = 0;
544         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != 
545                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
546
547                 if (bp->duplex == DUPLEX_FULL) {
548                         bp->flow_ctrl = bp->req_flow_ctrl;
549                 }
550                 return;
551         }
552
553         if (bp->duplex != DUPLEX_FULL) {
554                 return;
555         }
556
557         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
558             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
559                 u32 val;
560
561                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
562                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
563                         bp->flow_ctrl |= FLOW_CTRL_TX;
564                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
565                         bp->flow_ctrl |= FLOW_CTRL_RX;
566                 return;
567         }
568
569         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
570         bnx2_read_phy(bp, MII_LPA, &remote_adv);
571
572         if (bp->phy_flags & PHY_SERDES_FLAG) {
573                 u32 new_local_adv = 0;
574                 u32 new_remote_adv = 0;
575
576                 if (local_adv & ADVERTISE_1000XPAUSE)
577                         new_local_adv |= ADVERTISE_PAUSE_CAP;
578                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
579                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
580                 if (remote_adv & ADVERTISE_1000XPAUSE)
581                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
582                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
583                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
584
585                 local_adv = new_local_adv;
586                 remote_adv = new_remote_adv;
587         }
588
589         /* See Table 28B-3 of 802.3ab-1999 spec. */
590         if (local_adv & ADVERTISE_PAUSE_CAP) {
591                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
592                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
593                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
594                         }
595                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
596                                 bp->flow_ctrl = FLOW_CTRL_RX;
597                         }
598                 }
599                 else {
600                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
601                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
602                         }
603                 }
604         }
605         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
606                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
607                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
608
609                         bp->flow_ctrl = FLOW_CTRL_TX;
610                 }
611         }
612 }
613
614 static int
615 bnx2_5708s_linkup(struct bnx2 *bp)
616 {
617         u32 val;
618
619         bp->link_up = 1;
620         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
621         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
622                 case BCM5708S_1000X_STAT1_SPEED_10:
623                         bp->line_speed = SPEED_10;
624                         break;
625                 case BCM5708S_1000X_STAT1_SPEED_100:
626                         bp->line_speed = SPEED_100;
627                         break;
628                 case BCM5708S_1000X_STAT1_SPEED_1G:
629                         bp->line_speed = SPEED_1000;
630                         break;
631                 case BCM5708S_1000X_STAT1_SPEED_2G5:
632                         bp->line_speed = SPEED_2500;
633                         break;
634         }
635         if (val & BCM5708S_1000X_STAT1_FD)
636                 bp->duplex = DUPLEX_FULL;
637         else
638                 bp->duplex = DUPLEX_HALF;
639
640         return 0;
641 }
642
643 static int
644 bnx2_5706s_linkup(struct bnx2 *bp)
645 {
646         u32 bmcr, local_adv, remote_adv, common;
647
648         bp->link_up = 1;
649         bp->line_speed = SPEED_1000;
650
651         bnx2_read_phy(bp, MII_BMCR, &bmcr);
652         if (bmcr & BMCR_FULLDPLX) {
653                 bp->duplex = DUPLEX_FULL;
654         }
655         else {
656                 bp->duplex = DUPLEX_HALF;
657         }
658
659         if (!(bmcr & BMCR_ANENABLE)) {
660                 return 0;
661         }
662
663         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
664         bnx2_read_phy(bp, MII_LPA, &remote_adv);
665
666         common = local_adv & remote_adv;
667         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
668
669                 if (common & ADVERTISE_1000XFULL) {
670                         bp->duplex = DUPLEX_FULL;
671                 }
672                 else {
673                         bp->duplex = DUPLEX_HALF;
674                 }
675         }
676
677         return 0;
678 }
679
680 static int
681 bnx2_copper_linkup(struct bnx2 *bp)
682 {
683         u32 bmcr;
684
685         bnx2_read_phy(bp, MII_BMCR, &bmcr);
686         if (bmcr & BMCR_ANENABLE) {
687                 u32 local_adv, remote_adv, common;
688
689                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
690                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
691
692                 common = local_adv & (remote_adv >> 2);
693                 if (common & ADVERTISE_1000FULL) {
694                         bp->line_speed = SPEED_1000;
695                         bp->duplex = DUPLEX_FULL;
696                 }
697                 else if (common & ADVERTISE_1000HALF) {
698                         bp->line_speed = SPEED_1000;
699                         bp->duplex = DUPLEX_HALF;
700                 }
701                 else {
702                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
703                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
704
705                         common = local_adv & remote_adv;
706                         if (common & ADVERTISE_100FULL) {
707                                 bp->line_speed = SPEED_100;
708                                 bp->duplex = DUPLEX_FULL;
709                         }
710                         else if (common & ADVERTISE_100HALF) {
711                                 bp->line_speed = SPEED_100;
712                                 bp->duplex = DUPLEX_HALF;
713                         }
714                         else if (common & ADVERTISE_10FULL) {
715                                 bp->line_speed = SPEED_10;
716                                 bp->duplex = DUPLEX_FULL;
717                         }
718                         else if (common & ADVERTISE_10HALF) {
719                                 bp->line_speed = SPEED_10;
720                                 bp->duplex = DUPLEX_HALF;
721                         }
722                         else {
723                                 bp->line_speed = 0;
724                                 bp->link_up = 0;
725                         }
726                 }
727         }
728         else {
729                 if (bmcr & BMCR_SPEED100) {
730                         bp->line_speed = SPEED_100;
731                 }
732                 else {
733                         bp->line_speed = SPEED_10;
734                 }
735                 if (bmcr & BMCR_FULLDPLX) {
736                         bp->duplex = DUPLEX_FULL;
737                 }
738                 else {
739                         bp->duplex = DUPLEX_HALF;
740                 }
741         }
742
743         return 0;
744 }
745
746 static int
747 bnx2_set_mac_link(struct bnx2 *bp)
748 {
749         u32 val;
750
751         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
752         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
753                 (bp->duplex == DUPLEX_HALF)) {
754                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
755         }
756
757         /* Configure the EMAC mode register. */
758         val = REG_RD(bp, BNX2_EMAC_MODE);
759
760         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
761                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
762                 BNX2_EMAC_MODE_25G);
763
764         if (bp->link_up) {
765                 switch (bp->line_speed) {
766                         case SPEED_10:
767                                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
768                                         val |= BNX2_EMAC_MODE_PORT_MII_10;
769                                         break;
770                                 }
771                                 /* fall through */
772                         case SPEED_100:
773                                 val |= BNX2_EMAC_MODE_PORT_MII;
774                                 break;
775                         case SPEED_2500:
776                                 val |= BNX2_EMAC_MODE_25G;
777                                 /* fall through */
778                         case SPEED_1000:
779                                 val |= BNX2_EMAC_MODE_PORT_GMII;
780                                 break;
781                 }
782         }
783         else {
784                 val |= BNX2_EMAC_MODE_PORT_GMII;
785         }
786
787         /* Set the MAC to operate in the appropriate duplex mode. */
788         if (bp->duplex == DUPLEX_HALF)
789                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
790         REG_WR(bp, BNX2_EMAC_MODE, val);
791
792         /* Enable/disable rx PAUSE. */
793         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
794
795         if (bp->flow_ctrl & FLOW_CTRL_RX)
796                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
797         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
798
799         /* Enable/disable tx PAUSE. */
800         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
801         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
802
803         if (bp->flow_ctrl & FLOW_CTRL_TX)
804                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
805         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
806
807         /* Acknowledge the interrupt. */
808         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
809
810         return 0;
811 }
812
813 static int
814 bnx2_set_link(struct bnx2 *bp)
815 {
816         u32 bmsr;
817         u8 link_up;
818
819         if (bp->loopback == MAC_LOOPBACK) {
820                 bp->link_up = 1;
821                 return 0;
822         }
823
824         link_up = bp->link_up;
825
826         bnx2_read_phy(bp, MII_BMSR, &bmsr);
827         bnx2_read_phy(bp, MII_BMSR, &bmsr);
828
829         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
830             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
831                 u32 val;
832
833                 val = REG_RD(bp, BNX2_EMAC_STATUS);
834                 if (val & BNX2_EMAC_STATUS_LINK)
835                         bmsr |= BMSR_LSTATUS;
836                 else
837                         bmsr &= ~BMSR_LSTATUS;
838         }
839
840         if (bmsr & BMSR_LSTATUS) {
841                 bp->link_up = 1;
842
843                 if (bp->phy_flags & PHY_SERDES_FLAG) {
844                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
845                                 bnx2_5706s_linkup(bp);
846                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
847                                 bnx2_5708s_linkup(bp);
848                 }
849                 else {
850                         bnx2_copper_linkup(bp);
851                 }
852                 bnx2_resolve_flow_ctrl(bp);
853         }
854         else {
855                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
856                         (bp->autoneg & AUTONEG_SPEED)) {
857
858                         u32 bmcr;
859
860                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
861                         if (!(bmcr & BMCR_ANENABLE)) {
862                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
863                                         BMCR_ANENABLE);
864                         }
865                 }
866                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
867                 bp->link_up = 0;
868         }
869
870         if (bp->link_up != link_up) {
871                 bnx2_report_link(bp);
872         }
873
874         bnx2_set_mac_link(bp);
875
876         return 0;
877 }
878
879 static int
880 bnx2_reset_phy(struct bnx2 *bp)
881 {
882         int i;
883         u32 reg;
884
885         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
886
887 #define PHY_RESET_MAX_WAIT 100
888         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
889                 udelay(10);
890
891                 bnx2_read_phy(bp, MII_BMCR, &reg);
892                 if (!(reg & BMCR_RESET)) {
893                         udelay(20);
894                         break;
895                 }
896         }
897         if (i == PHY_RESET_MAX_WAIT) {
898                 return -EBUSY;
899         }
900         return 0;
901 }
902
903 static u32
904 bnx2_phy_get_pause_adv(struct bnx2 *bp)
905 {
906         u32 adv = 0;
907
908         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
909                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
910
911                 if (bp->phy_flags & PHY_SERDES_FLAG) {
912                         adv = ADVERTISE_1000XPAUSE;
913                 }
914                 else {
915                         adv = ADVERTISE_PAUSE_CAP;
916                 }
917         }
918         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
919                 if (bp->phy_flags & PHY_SERDES_FLAG) {
920                         adv = ADVERTISE_1000XPSE_ASYM;
921                 }
922                 else {
923                         adv = ADVERTISE_PAUSE_ASYM;
924                 }
925         }
926         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
927                 if (bp->phy_flags & PHY_SERDES_FLAG) {
928                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
929                 }
930                 else {
931                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
932                 }
933         }
934         return adv;
935 }
936
937 static int
938 bnx2_setup_serdes_phy(struct bnx2 *bp)
939 {
940         u32 adv, bmcr, up1;
941         u32 new_adv = 0;
942
943         if (!(bp->autoneg & AUTONEG_SPEED)) {
944                 u32 new_bmcr;
945                 int force_link_down = 0;
946
947                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
948                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
949                         if (up1 & BCM5708S_UP1_2G5) {
950                                 up1 &= ~BCM5708S_UP1_2G5;
951                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
952                                 force_link_down = 1;
953                         }
954                 }
955
956                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
957                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
958
959                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
960                 new_bmcr = bmcr & ~BMCR_ANENABLE;
961                 new_bmcr |= BMCR_SPEED1000;
962                 if (bp->req_duplex == DUPLEX_FULL) {
963                         adv |= ADVERTISE_1000XFULL;
964                         new_bmcr |= BMCR_FULLDPLX;
965                 }
966                 else {
967                         adv |= ADVERTISE_1000XHALF;
968                         new_bmcr &= ~BMCR_FULLDPLX;
969                 }
970                 if ((new_bmcr != bmcr) || (force_link_down)) {
971                         /* Force a link down visible on the other side */
972                         if (bp->link_up) {
973                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
974                                                ~(ADVERTISE_1000XFULL |
975                                                  ADVERTISE_1000XHALF));
976                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
977                                         BMCR_ANRESTART | BMCR_ANENABLE);
978
979                                 bp->link_up = 0;
980                                 netif_carrier_off(bp->dev);
981                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
982                         }
983                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
984                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
985                 }
986                 return 0;
987         }
988
989         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
990                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
991                 up1 |= BCM5708S_UP1_2G5;
992                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
993         }
994
995         if (bp->advertising & ADVERTISED_1000baseT_Full)
996                 new_adv |= ADVERTISE_1000XFULL;
997
998         new_adv |= bnx2_phy_get_pause_adv(bp);
999
1000         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1001         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1002
1003         bp->serdes_an_pending = 0;
1004         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1005                 /* Force a link down visible on the other side */
1006                 if (bp->link_up) {
1007                         int i;
1008
1009                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1010                         for (i = 0; i < 110; i++) {
1011                                 udelay(100);
1012                         }
1013                 }
1014
1015                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1016                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1017                         BMCR_ANENABLE);
1018                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1019                         /* Speed up link-up time when the link partner
1020                          * does not autonegotiate which is very common
1021                          * in blade servers. Some blade servers use
1022                          * IPMI for kerboard input and it's important
1023                          * to minimize link disruptions. Autoneg. involves
1024                          * exchanging base pages plus 3 next pages and
1025                          * normally completes in about 120 msec.
1026                          */
1027                         bp->current_interval = SERDES_AN_TIMEOUT;
1028                         bp->serdes_an_pending = 1;
1029                         mod_timer(&bp->timer, jiffies + bp->current_interval);
1030                 }
1031         }
1032
1033         return 0;
1034 }
1035
1036 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1037         (ADVERTISED_1000baseT_Full)
1038
1039 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1040         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1041         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1042         ADVERTISED_1000baseT_Full)
1043
1044 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1045         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1046         
1047 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1048
1049 static int
1050 bnx2_setup_copper_phy(struct bnx2 *bp)
1051 {
1052         u32 bmcr;
1053         u32 new_bmcr;
1054
1055         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1056
1057         if (bp->autoneg & AUTONEG_SPEED) {
1058                 u32 adv_reg, adv1000_reg;
1059                 u32 new_adv_reg = 0;
1060                 u32 new_adv1000_reg = 0;
1061
1062                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1063                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1064                         ADVERTISE_PAUSE_ASYM);
1065
1066                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1067                 adv1000_reg &= PHY_ALL_1000_SPEED;
1068
1069                 if (bp->advertising & ADVERTISED_10baseT_Half)
1070                         new_adv_reg |= ADVERTISE_10HALF;
1071                 if (bp->advertising & ADVERTISED_10baseT_Full)
1072                         new_adv_reg |= ADVERTISE_10FULL;
1073                 if (bp->advertising & ADVERTISED_100baseT_Half)
1074                         new_adv_reg |= ADVERTISE_100HALF;
1075                 if (bp->advertising & ADVERTISED_100baseT_Full)
1076                         new_adv_reg |= ADVERTISE_100FULL;
1077                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1078                         new_adv1000_reg |= ADVERTISE_1000FULL;
1079                 
1080                 new_adv_reg |= ADVERTISE_CSMA;
1081
1082                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1083
1084                 if ((adv1000_reg != new_adv1000_reg) ||
1085                         (adv_reg != new_adv_reg) ||
1086                         ((bmcr & BMCR_ANENABLE) == 0)) {
1087
1088                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1089                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1090                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1091                                 BMCR_ANENABLE);
1092                 }
1093                 else if (bp->link_up) {
1094                         /* Flow ctrl may have changed from auto to forced */
1095                         /* or vice-versa. */
1096
1097                         bnx2_resolve_flow_ctrl(bp);
1098                         bnx2_set_mac_link(bp);
1099                 }
1100                 return 0;
1101         }
1102
1103         new_bmcr = 0;
1104         if (bp->req_line_speed == SPEED_100) {
1105                 new_bmcr |= BMCR_SPEED100;
1106         }
1107         if (bp->req_duplex == DUPLEX_FULL) {
1108                 new_bmcr |= BMCR_FULLDPLX;
1109         }
1110         if (new_bmcr != bmcr) {
1111                 u32 bmsr;
1112                 int i = 0;
1113
1114                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1115                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1116                 
1117                 if (bmsr & BMSR_LSTATUS) {
1118                         /* Force link down */
1119                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1120                         do {
1121                                 udelay(100);
1122                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1123                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1124                                 i++;
1125                         } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1126                 }
1127
1128                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1129
1130                 /* Normally, the new speed is setup after the link has
1131                  * gone down and up again. In some cases, link will not go
1132                  * down so we need to set up the new speed here.
1133                  */
1134                 if (bmsr & BMSR_LSTATUS) {
1135                         bp->line_speed = bp->req_line_speed;
1136                         bp->duplex = bp->req_duplex;
1137                         bnx2_resolve_flow_ctrl(bp);
1138                         bnx2_set_mac_link(bp);
1139                 }
1140         }
1141         return 0;
1142 }
1143
1144 static int
1145 bnx2_setup_phy(struct bnx2 *bp)
1146 {
1147         if (bp->loopback == MAC_LOOPBACK)
1148                 return 0;
1149
1150         if (bp->phy_flags & PHY_SERDES_FLAG) {
1151                 return (bnx2_setup_serdes_phy(bp));
1152         }
1153         else {
1154                 return (bnx2_setup_copper_phy(bp));
1155         }
1156 }
1157
1158 static int
1159 bnx2_init_5708s_phy(struct bnx2 *bp)
1160 {
1161         u32 val;
1162
1163         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1164         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1165         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1166
1167         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1168         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1169         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1170
1171         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1172         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1173         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1174
1175         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1176                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1177                 val |= BCM5708S_UP1_2G5;
1178                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1179         }
1180
1181         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1182             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1183             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1184                 /* increase tx signal amplitude */
1185                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1186                                BCM5708S_BLK_ADDR_TX_MISC);
1187                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1188                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1189                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1190                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1191         }
1192
1193         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1194               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1195
1196         if (val) {
1197                 u32 is_backplane;
1198
1199                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1200                                           BNX2_SHARED_HW_CFG_CONFIG);
1201                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1202                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1203                                        BCM5708S_BLK_ADDR_TX_MISC);
1204                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1205                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1206                                        BCM5708S_BLK_ADDR_DIG);
1207                 }
1208         }
1209         return 0;
1210 }
1211
1212 static int
1213 bnx2_init_5706s_phy(struct bnx2 *bp)
1214 {
1215         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1216
1217         if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1218                 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1219         }
1220
1221         if (bp->dev->mtu > 1500) {
1222                 u32 val;
1223
1224                 /* Set extended packet length bit */
1225                 bnx2_write_phy(bp, 0x18, 0x7);
1226                 bnx2_read_phy(bp, 0x18, &val);
1227                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1228
1229                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1230                 bnx2_read_phy(bp, 0x1c, &val);
1231                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1232         }
1233         else {
1234                 u32 val;
1235
1236                 bnx2_write_phy(bp, 0x18, 0x7);
1237                 bnx2_read_phy(bp, 0x18, &val);
1238                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1239
1240                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1241                 bnx2_read_phy(bp, 0x1c, &val);
1242                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1243         }
1244
1245         return 0;
1246 }
1247
1248 static int
1249 bnx2_init_copper_phy(struct bnx2 *bp)
1250 {
1251         u32 val;
1252
1253         bp->phy_flags |= PHY_CRC_FIX_FLAG;
1254
1255         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1256                 bnx2_write_phy(bp, 0x18, 0x0c00);
1257                 bnx2_write_phy(bp, 0x17, 0x000a);
1258                 bnx2_write_phy(bp, 0x15, 0x310b);
1259                 bnx2_write_phy(bp, 0x17, 0x201f);
1260                 bnx2_write_phy(bp, 0x15, 0x9506);
1261                 bnx2_write_phy(bp, 0x17, 0x401f);
1262                 bnx2_write_phy(bp, 0x15, 0x14e2);
1263                 bnx2_write_phy(bp, 0x18, 0x0400);
1264         }
1265
1266         if (bp->dev->mtu > 1500) {
1267                 /* Set extended packet length bit */
1268                 bnx2_write_phy(bp, 0x18, 0x7);
1269                 bnx2_read_phy(bp, 0x18, &val);
1270                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1271
1272                 bnx2_read_phy(bp, 0x10, &val);
1273                 bnx2_write_phy(bp, 0x10, val | 0x1);
1274         }
1275         else {
1276                 bnx2_write_phy(bp, 0x18, 0x7);
1277                 bnx2_read_phy(bp, 0x18, &val);
1278                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1279
1280                 bnx2_read_phy(bp, 0x10, &val);
1281                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1282         }
1283
1284         /* ethernet@wirespeed */
1285         bnx2_write_phy(bp, 0x18, 0x7007);
1286         bnx2_read_phy(bp, 0x18, &val);
1287         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1288         return 0;
1289 }
1290
1291
1292 static int
1293 bnx2_init_phy(struct bnx2 *bp)
1294 {
1295         u32 val;
1296         int rc = 0;
1297
1298         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1299         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1300
1301         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1302
1303         bnx2_reset_phy(bp);
1304
1305         bnx2_read_phy(bp, MII_PHYSID1, &val);
1306         bp->phy_id = val << 16;
1307         bnx2_read_phy(bp, MII_PHYSID2, &val);
1308         bp->phy_id |= val & 0xffff;
1309
1310         if (bp->phy_flags & PHY_SERDES_FLAG) {
1311                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1312                         rc = bnx2_init_5706s_phy(bp);
1313                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1314                         rc = bnx2_init_5708s_phy(bp);
1315         }
1316         else {
1317                 rc = bnx2_init_copper_phy(bp);
1318         }
1319
1320         bnx2_setup_phy(bp);
1321
1322         return rc;
1323 }
1324
1325 static int
1326 bnx2_set_mac_loopback(struct bnx2 *bp)
1327 {
1328         u32 mac_mode;
1329
1330         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1331         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1332         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1333         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1334         bp->link_up = 1;
1335         return 0;
1336 }
1337
1338 static int bnx2_test_link(struct bnx2 *);
1339
1340 static int
1341 bnx2_set_phy_loopback(struct bnx2 *bp)
1342 {
1343         u32 mac_mode;
1344         int rc, i;
1345
1346         spin_lock_bh(&bp->phy_lock);
1347         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1348                             BMCR_SPEED1000);
1349         spin_unlock_bh(&bp->phy_lock);
1350         if (rc)
1351                 return rc;
1352
1353         for (i = 0; i < 10; i++) {
1354                 if (bnx2_test_link(bp) == 0)
1355                         break;
1356                 udelay(10);
1357         }
1358
1359         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1360         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1361                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1362                       BNX2_EMAC_MODE_25G);
1363
1364         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1365         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1366         bp->link_up = 1;
1367         return 0;
1368 }
1369
1370 static int
1371 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1372 {
1373         int i;
1374         u32 val;
1375
1376         bp->fw_wr_seq++;
1377         msg_data |= bp->fw_wr_seq;
1378
1379         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1380
1381         /* wait for an acknowledgement. */
1382         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1383                 msleep(10);
1384
1385                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1386
1387                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1388                         break;
1389         }
1390         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1391                 return 0;
1392
1393         /* If we timed out, inform the firmware that this is the case. */
1394         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1395                 if (!silent)
1396                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1397                                             "%x\n", msg_data);
1398
1399                 msg_data &= ~BNX2_DRV_MSG_CODE;
1400                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1401
1402                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1403
1404                 return -EBUSY;
1405         }
1406
1407         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1408                 return -EIO;
1409
1410         return 0;
1411 }
1412
1413 static void
1414 bnx2_init_context(struct bnx2 *bp)
1415 {
1416         u32 vcid;
1417
1418         vcid = 96;
1419         while (vcid) {
1420                 u32 vcid_addr, pcid_addr, offset;
1421
1422                 vcid--;
1423
1424                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1425                         u32 new_vcid;
1426
1427                         vcid_addr = GET_PCID_ADDR(vcid);
1428                         if (vcid & 0x8) {
1429                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1430                         }
1431                         else {
1432                                 new_vcid = vcid;
1433                         }
1434                         pcid_addr = GET_PCID_ADDR(new_vcid);
1435                 }
1436                 else {
1437                         vcid_addr = GET_CID_ADDR(vcid);
1438                         pcid_addr = vcid_addr;
1439                 }
1440
1441                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1442                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1443
1444                 /* Zero out the context. */
1445                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1446                         CTX_WR(bp, 0x00, offset, 0);
1447                 }
1448
1449                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1450                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1451         }
1452 }
1453
1454 static int
1455 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1456 {
1457         u16 *good_mbuf;
1458         u32 good_mbuf_cnt;
1459         u32 val;
1460
1461         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1462         if (good_mbuf == NULL) {
1463                 printk(KERN_ERR PFX "Failed to allocate memory in "
1464                                     "bnx2_alloc_bad_rbuf\n");
1465                 return -ENOMEM;
1466         }
1467
1468         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1469                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1470
1471         good_mbuf_cnt = 0;
1472
1473         /* Allocate a bunch of mbufs and save the good ones in an array. */
1474         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1475         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1476                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1477
1478                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1479
1480                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1481
1482                 /* The addresses with Bit 9 set are bad memory blocks. */
1483                 if (!(val & (1 << 9))) {
1484                         good_mbuf[good_mbuf_cnt] = (u16) val;
1485                         good_mbuf_cnt++;
1486                 }
1487
1488                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1489         }
1490
1491         /* Free the good ones back to the mbuf pool thus discarding
1492          * all the bad ones. */
1493         while (good_mbuf_cnt) {
1494                 good_mbuf_cnt--;
1495
1496                 val = good_mbuf[good_mbuf_cnt];
1497                 val = (val << 9) | val | 1;
1498
1499                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1500         }
1501         kfree(good_mbuf);
1502         return 0;
1503 }
1504
1505 static void
1506 bnx2_set_mac_addr(struct bnx2 *bp) 
1507 {
1508         u32 val;
1509         u8 *mac_addr = bp->dev->dev_addr;
1510
1511         val = (mac_addr[0] << 8) | mac_addr[1];
1512
1513         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1514
1515         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 
1516                 (mac_addr[4] << 8) | mac_addr[5];
1517
1518         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1519 }
1520
1521 static inline int
1522 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1523 {
1524         struct sk_buff *skb;
1525         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1526         dma_addr_t mapping;
1527         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1528         unsigned long align;
1529
1530         skb = dev_alloc_skb(bp->rx_buf_size);
1531         if (skb == NULL) {
1532                 return -ENOMEM;
1533         }
1534
1535         if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1536                 skb_reserve(skb, 8 - align);
1537         }
1538
1539         skb->dev = bp->dev;
1540         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1541                 PCI_DMA_FROMDEVICE);
1542
1543         rx_buf->skb = skb;
1544         pci_unmap_addr_set(rx_buf, mapping, mapping);
1545
1546         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1547         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1548
1549         bp->rx_prod_bseq += bp->rx_buf_use_size;
1550
1551         return 0;
1552 }
1553
1554 static void
1555 bnx2_phy_int(struct bnx2 *bp)
1556 {
1557         u32 new_link_state, old_link_state;
1558
1559         new_link_state = bp->status_blk->status_attn_bits &
1560                 STATUS_ATTN_BITS_LINK_STATE;
1561         old_link_state = bp->status_blk->status_attn_bits_ack &
1562                 STATUS_ATTN_BITS_LINK_STATE;
1563         if (new_link_state != old_link_state) {
1564                 if (new_link_state) {
1565                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1566                                 STATUS_ATTN_BITS_LINK_STATE);
1567                 }
1568                 else {
1569                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1570                                 STATUS_ATTN_BITS_LINK_STATE);
1571                 }
1572                 bnx2_set_link(bp);
1573         }
1574 }
1575
1576 static void
1577 bnx2_tx_int(struct bnx2 *bp)
1578 {
1579         struct status_block *sblk = bp->status_blk;
1580         u16 hw_cons, sw_cons, sw_ring_cons;
1581         int tx_free_bd = 0;
1582
1583         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1584         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1585                 hw_cons++;
1586         }
1587         sw_cons = bp->tx_cons;
1588
1589         while (sw_cons != hw_cons) {
1590                 struct sw_bd *tx_buf;
1591                 struct sk_buff *skb;
1592                 int i, last;
1593
1594                 sw_ring_cons = TX_RING_IDX(sw_cons);
1595
1596                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1597                 skb = tx_buf->skb;
1598 #ifdef BCM_TSO 
1599                 /* partial BD completions possible with TSO packets */
1600                 if (skb_shinfo(skb)->tso_size) {
1601                         u16 last_idx, last_ring_idx;
1602
1603                         last_idx = sw_cons +
1604                                 skb_shinfo(skb)->nr_frags + 1;
1605                         last_ring_idx = sw_ring_cons +
1606                                 skb_shinfo(skb)->nr_frags + 1;
1607                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1608                                 last_idx++;
1609                         }
1610                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1611                                 break;
1612                         }
1613                 }
1614 #endif
1615                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1616                         skb_headlen(skb), PCI_DMA_TODEVICE);
1617
1618                 tx_buf->skb = NULL;
1619                 last = skb_shinfo(skb)->nr_frags;
1620
1621                 for (i = 0; i < last; i++) {
1622                         sw_cons = NEXT_TX_BD(sw_cons);
1623
1624                         pci_unmap_page(bp->pdev,
1625                                 pci_unmap_addr(
1626                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1627                                         mapping),
1628                                 skb_shinfo(skb)->frags[i].size,
1629                                 PCI_DMA_TODEVICE);
1630                 }
1631
1632                 sw_cons = NEXT_TX_BD(sw_cons);
1633
1634                 tx_free_bd += last + 1;
1635
1636                 dev_kfree_skb_irq(skb);
1637
1638                 hw_cons = bp->hw_tx_cons =
1639                         sblk->status_tx_quick_consumer_index0;
1640
1641                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1642                         hw_cons++;
1643                 }
1644         }
1645
1646         bp->tx_cons = sw_cons;
1647
1648         if (unlikely(netif_queue_stopped(bp->dev))) {
1649                 spin_lock(&bp->tx_lock);
1650                 if ((netif_queue_stopped(bp->dev)) &&
1651                     (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1652
1653                         netif_wake_queue(bp->dev);
1654                 }
1655                 spin_unlock(&bp->tx_lock);
1656         }
1657 }
1658
1659 static inline void
1660 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1661         u16 cons, u16 prod)
1662 {
1663         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1664         struct rx_bd *cons_bd, *prod_bd;
1665
1666         cons_rx_buf = &bp->rx_buf_ring[cons];
1667         prod_rx_buf = &bp->rx_buf_ring[prod];
1668
1669         pci_dma_sync_single_for_device(bp->pdev,
1670                 pci_unmap_addr(cons_rx_buf, mapping),
1671                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1672
1673         bp->rx_prod_bseq += bp->rx_buf_use_size;
1674
1675         prod_rx_buf->skb = skb;
1676
1677         if (cons == prod)
1678                 return;
1679
1680         pci_unmap_addr_set(prod_rx_buf, mapping,
1681                         pci_unmap_addr(cons_rx_buf, mapping));
1682
1683         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1684         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1685         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1686         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1687 }
1688
1689 static int
1690 bnx2_rx_int(struct bnx2 *bp, int budget)
1691 {
1692         struct status_block *sblk = bp->status_blk;
1693         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1694         struct l2_fhdr *rx_hdr;
1695         int rx_pkt = 0;
1696
1697         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1698         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1699                 hw_cons++;
1700         }
1701         sw_cons = bp->rx_cons;
1702         sw_prod = bp->rx_prod;
1703
1704         /* Memory barrier necessary as speculative reads of the rx
1705          * buffer can be ahead of the index in the status block
1706          */
1707         rmb();
1708         while (sw_cons != hw_cons) {
1709                 unsigned int len;
1710                 u32 status;
1711                 struct sw_bd *rx_buf;
1712                 struct sk_buff *skb;
1713                 dma_addr_t dma_addr;
1714
1715                 sw_ring_cons = RX_RING_IDX(sw_cons);
1716                 sw_ring_prod = RX_RING_IDX(sw_prod);
1717
1718                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1719                 skb = rx_buf->skb;
1720
1721                 rx_buf->skb = NULL;
1722
1723                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1724
1725                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1726                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1727
1728                 rx_hdr = (struct l2_fhdr *) skb->data;
1729                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1730
1731                 if ((status = rx_hdr->l2_fhdr_status) &
1732                         (L2_FHDR_ERRORS_BAD_CRC |
1733                         L2_FHDR_ERRORS_PHY_DECODE |
1734                         L2_FHDR_ERRORS_ALIGNMENT |
1735                         L2_FHDR_ERRORS_TOO_SHORT |
1736                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1737
1738                         goto reuse_rx;
1739                 }
1740
1741                 /* Since we don't have a jumbo ring, copy small packets
1742                  * if mtu > 1500
1743                  */
1744                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1745                         struct sk_buff *new_skb;
1746
1747                         new_skb = dev_alloc_skb(len + 2);
1748                         if (new_skb == NULL)
1749                                 goto reuse_rx;
1750
1751                         /* aligned copy */
1752                         memcpy(new_skb->data,
1753                                 skb->data + bp->rx_offset - 2,
1754                                 len + 2);
1755
1756                         skb_reserve(new_skb, 2);
1757                         skb_put(new_skb, len);
1758                         new_skb->dev = bp->dev;
1759
1760                         bnx2_reuse_rx_skb(bp, skb,
1761                                 sw_ring_cons, sw_ring_prod);
1762
1763                         skb = new_skb;
1764                 }
1765                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1766                         pci_unmap_single(bp->pdev, dma_addr,
1767                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1768
1769                         skb_reserve(skb, bp->rx_offset);
1770                         skb_put(skb, len);
1771                 }
1772                 else {
1773 reuse_rx:
1774                         bnx2_reuse_rx_skb(bp, skb,
1775                                 sw_ring_cons, sw_ring_prod);
1776                         goto next_rx;
1777                 }
1778
1779                 skb->protocol = eth_type_trans(skb, bp->dev);
1780
1781                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1782                         (htons(skb->protocol) != 0x8100)) {
1783
1784                         dev_kfree_skb_irq(skb);
1785                         goto next_rx;
1786
1787                 }
1788
1789                 skb->ip_summed = CHECKSUM_NONE;
1790                 if (bp->rx_csum &&
1791                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1792                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1793
1794                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1795                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1796                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1797                 }
1798
1799 #ifdef BCM_VLAN
1800                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1801                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1802                                 rx_hdr->l2_fhdr_vlan_tag);
1803                 }
1804                 else
1805 #endif
1806                         netif_receive_skb(skb);
1807
1808                 bp->dev->last_rx = jiffies;
1809                 rx_pkt++;
1810
1811 next_rx:
1812                 sw_cons = NEXT_RX_BD(sw_cons);
1813                 sw_prod = NEXT_RX_BD(sw_prod);
1814
1815                 if ((rx_pkt == budget))
1816                         break;
1817
1818                 /* Refresh hw_cons to see if there is new work */
1819                 if (sw_cons == hw_cons) {
1820                         hw_cons = bp->hw_rx_cons =
1821                                 sblk->status_rx_quick_consumer_index0;
1822                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1823                                 hw_cons++;
1824                         rmb();
1825                 }
1826         }
1827         bp->rx_cons = sw_cons;
1828         bp->rx_prod = sw_prod;
1829
1830         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1831
1832         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1833
1834         mmiowb();
1835
1836         return rx_pkt;
1837
1838 }
1839
1840 /* MSI ISR - The only difference between this and the INTx ISR
1841  * is that the MSI interrupt is always serviced.
1842  */
1843 static irqreturn_t
1844 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1845 {
1846         struct net_device *dev = dev_instance;
1847         struct bnx2 *bp = netdev_priv(dev);
1848
1849         prefetch(bp->status_blk);
1850         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1851                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1852                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1853
1854         /* Return here if interrupt is disabled. */
1855         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1856                 return IRQ_HANDLED;
1857
1858         netif_rx_schedule(dev);
1859
1860         return IRQ_HANDLED;
1861 }
1862
1863 static irqreturn_t
1864 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1865 {
1866         struct net_device *dev = dev_instance;
1867         struct bnx2 *bp = netdev_priv(dev);
1868
1869         /* When using INTx, it is possible for the interrupt to arrive
1870          * at the CPU before the status block posted prior to the
1871          * interrupt. Reading a register will flush the status block.
1872          * When using MSI, the MSI message will always complete after
1873          * the status block write.
1874          */
1875         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1876             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1877              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1878                 return IRQ_NONE;
1879
1880         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1881                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1882                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1883
1884         /* Return here if interrupt is shared and is disabled. */
1885         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1886                 return IRQ_HANDLED;
1887
1888         netif_rx_schedule(dev);
1889
1890         return IRQ_HANDLED;
1891 }
1892
1893 static inline int
1894 bnx2_has_work(struct bnx2 *bp)
1895 {
1896         struct status_block *sblk = bp->status_blk;
1897
1898         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1899             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1900                 return 1;
1901
1902         if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1903             bp->link_up)
1904                 return 1;
1905
1906         return 0;
1907 }
1908
1909 static int
1910 bnx2_poll(struct net_device *dev, int *budget)
1911 {
1912         struct bnx2 *bp = netdev_priv(dev);
1913
1914         if ((bp->status_blk->status_attn_bits &
1915                 STATUS_ATTN_BITS_LINK_STATE) !=
1916                 (bp->status_blk->status_attn_bits_ack &
1917                 STATUS_ATTN_BITS_LINK_STATE)) {
1918
1919                 spin_lock(&bp->phy_lock);
1920                 bnx2_phy_int(bp);
1921                 spin_unlock(&bp->phy_lock);
1922
1923                 /* This is needed to take care of transient status
1924                  * during link changes.
1925                  */
1926                 REG_WR(bp, BNX2_HC_COMMAND,
1927                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1928                 REG_RD(bp, BNX2_HC_COMMAND);
1929         }
1930
1931         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1932                 bnx2_tx_int(bp);
1933
1934         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1935                 int orig_budget = *budget;
1936                 int work_done;
1937
1938                 if (orig_budget > dev->quota)
1939                         orig_budget = dev->quota;
1940                 
1941                 work_done = bnx2_rx_int(bp, orig_budget);
1942                 *budget -= work_done;
1943                 dev->quota -= work_done;
1944         }
1945         
1946         bp->last_status_idx = bp->status_blk->status_idx;
1947         rmb();
1948
1949         if (!bnx2_has_work(bp)) {
1950                 netif_rx_complete(dev);
1951                 if (likely(bp->flags & USING_MSI_FLAG)) {
1952                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1953                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1954                                bp->last_status_idx);
1955                         return 0;
1956                 }
1957                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1958                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1959                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1960                        bp->last_status_idx);
1961
1962                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1963                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1964                        bp->last_status_idx);
1965                 return 0;
1966         }
1967
1968         return 1;
1969 }
1970
1971 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1972  * from set_multicast.
1973  */
1974 static void
1975 bnx2_set_rx_mode(struct net_device *dev)
1976 {
1977         struct bnx2 *bp = netdev_priv(dev);
1978         u32 rx_mode, sort_mode;
1979         int i;
1980
1981         spin_lock_bh(&bp->phy_lock);
1982
1983         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1984                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1985         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1986 #ifdef BCM_VLAN
1987         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
1988                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1989 #else
1990         if (!(bp->flags & ASF_ENABLE_FLAG))
1991                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1992 #endif
1993         if (dev->flags & IFF_PROMISC) {
1994                 /* Promiscuous mode. */
1995                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
1996                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
1997         }
1998         else if (dev->flags & IFF_ALLMULTI) {
1999                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2000                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2001                                0xffffffff);
2002                 }
2003                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2004         }
2005         else {
2006                 /* Accept one or more multicast(s). */
2007                 struct dev_mc_list *mclist;
2008                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2009                 u32 regidx;
2010                 u32 bit;
2011                 u32 crc;
2012
2013                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2014
2015                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2016                      i++, mclist = mclist->next) {
2017
2018                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2019                         bit = crc & 0xff;
2020                         regidx = (bit & 0xe0) >> 5;
2021                         bit &= 0x1f;
2022                         mc_filter[regidx] |= (1 << bit);
2023                 }
2024
2025                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2026                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2027                                mc_filter[i]);
2028                 }
2029
2030                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2031         }
2032
2033         if (rx_mode != bp->rx_mode) {
2034                 bp->rx_mode = rx_mode;
2035                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2036         }
2037
2038         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2039         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2040         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2041
2042         spin_unlock_bh(&bp->phy_lock);
2043 }
2044
2045 static void
2046 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2047         u32 rv2p_proc)
2048 {
2049         int i;
2050         u32 val;
2051
2052
2053         for (i = 0; i < rv2p_code_len; i += 8) {
2054                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2055                 rv2p_code++;
2056                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2057                 rv2p_code++;
2058
2059                 if (rv2p_proc == RV2P_PROC1) {
2060                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2061                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2062                 }
2063                 else {
2064                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2065                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2066                 }
2067         }
2068
2069         /* Reset the processor, un-stall is done later. */
2070         if (rv2p_proc == RV2P_PROC1) {
2071                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2072         }
2073         else {
2074                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2075         }
2076 }
2077
2078 static void
2079 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2080 {
2081         u32 offset;
2082         u32 val;
2083
2084         /* Halt the CPU. */
2085         val = REG_RD_IND(bp, cpu_reg->mode);
2086         val |= cpu_reg->mode_value_halt;
2087         REG_WR_IND(bp, cpu_reg->mode, val);
2088         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2089
2090         /* Load the Text area. */
2091         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2092         if (fw->text) {
2093                 int j;
2094
2095                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2096                         REG_WR_IND(bp, offset, fw->text[j]);
2097                 }
2098         }
2099
2100         /* Load the Data area. */
2101         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2102         if (fw->data) {
2103                 int j;
2104
2105                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2106                         REG_WR_IND(bp, offset, fw->data[j]);
2107                 }
2108         }
2109
2110         /* Load the SBSS area. */
2111         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2112         if (fw->sbss) {
2113                 int j;
2114
2115                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2116                         REG_WR_IND(bp, offset, fw->sbss[j]);
2117                 }
2118         }
2119
2120         /* Load the BSS area. */
2121         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2122         if (fw->bss) {
2123                 int j;
2124
2125                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2126                         REG_WR_IND(bp, offset, fw->bss[j]);
2127                 }
2128         }
2129
2130         /* Load the Read-Only area. */
2131         offset = cpu_reg->spad_base +
2132                 (fw->rodata_addr - cpu_reg->mips_view_base);
2133         if (fw->rodata) {
2134                 int j;
2135
2136                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2137                         REG_WR_IND(bp, offset, fw->rodata[j]);
2138                 }
2139         }
2140
2141         /* Clear the pre-fetch instruction. */
2142         REG_WR_IND(bp, cpu_reg->inst, 0);
2143         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2144
2145         /* Start the CPU. */
2146         val = REG_RD_IND(bp, cpu_reg->mode);
2147         val &= ~cpu_reg->mode_value_halt;
2148         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2149         REG_WR_IND(bp, cpu_reg->mode, val);
2150 }
2151
2152 static void
2153 bnx2_init_cpus(struct bnx2 *bp)
2154 {
2155         struct cpu_reg cpu_reg;
2156         struct fw_info fw;
2157
2158         /* Initialize the RV2P processor. */
2159         load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2160         load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2161
2162         /* Initialize the RX Processor. */
2163         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2164         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2165         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2166         cpu_reg.state = BNX2_RXP_CPU_STATE;
2167         cpu_reg.state_value_clear = 0xffffff;
2168         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2169         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2170         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2171         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2172         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2173         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2174         cpu_reg.mips_view_base = 0x8000000;
2175     
2176         fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2177         fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2178         fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2179         fw.start_addr = bnx2_RXP_b06FwStartAddr;
2180
2181         fw.text_addr = bnx2_RXP_b06FwTextAddr;
2182         fw.text_len = bnx2_RXP_b06FwTextLen;
2183         fw.text_index = 0;
2184         fw.text = bnx2_RXP_b06FwText;
2185
2186         fw.data_addr = bnx2_RXP_b06FwDataAddr;
2187         fw.data_len = bnx2_RXP_b06FwDataLen;
2188         fw.data_index = 0;
2189         fw.data = bnx2_RXP_b06FwData;
2190
2191         fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2192         fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2193         fw.sbss_index = 0;
2194         fw.sbss = bnx2_RXP_b06FwSbss;
2195
2196         fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2197         fw.bss_len = bnx2_RXP_b06FwBssLen;
2198         fw.bss_index = 0;
2199         fw.bss = bnx2_RXP_b06FwBss;
2200
2201         fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2202         fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2203         fw.rodata_index = 0;
2204         fw.rodata = bnx2_RXP_b06FwRodata;
2205
2206         load_cpu_fw(bp, &cpu_reg, &fw);
2207
2208         /* Initialize the TX Processor. */
2209         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2210         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2211         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2212         cpu_reg.state = BNX2_TXP_CPU_STATE;
2213         cpu_reg.state_value_clear = 0xffffff;
2214         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2215         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2216         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2217         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2218         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2219         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2220         cpu_reg.mips_view_base = 0x8000000;
2221     
2222         fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2223         fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2224         fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2225         fw.start_addr = bnx2_TXP_b06FwStartAddr;
2226
2227         fw.text_addr = bnx2_TXP_b06FwTextAddr;
2228         fw.text_len = bnx2_TXP_b06FwTextLen;
2229         fw.text_index = 0;
2230         fw.text = bnx2_TXP_b06FwText;
2231
2232         fw.data_addr = bnx2_TXP_b06FwDataAddr;
2233         fw.data_len = bnx2_TXP_b06FwDataLen;
2234         fw.data_index = 0;
2235         fw.data = bnx2_TXP_b06FwData;
2236
2237         fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2238         fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2239         fw.sbss_index = 0;
2240         fw.sbss = bnx2_TXP_b06FwSbss;
2241
2242         fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2243         fw.bss_len = bnx2_TXP_b06FwBssLen;
2244         fw.bss_index = 0;
2245         fw.bss = bnx2_TXP_b06FwBss;
2246
2247         fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2248         fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2249         fw.rodata_index = 0;
2250         fw.rodata = bnx2_TXP_b06FwRodata;
2251
2252         load_cpu_fw(bp, &cpu_reg, &fw);
2253
2254         /* Initialize the TX Patch-up Processor. */
2255         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2256         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2257         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2258         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2259         cpu_reg.state_value_clear = 0xffffff;
2260         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2261         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2262         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2263         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2264         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2265         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2266         cpu_reg.mips_view_base = 0x8000000;
2267     
2268         fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2269         fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2270         fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2271         fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2272
2273         fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2274         fw.text_len = bnx2_TPAT_b06FwTextLen;
2275         fw.text_index = 0;
2276         fw.text = bnx2_TPAT_b06FwText;
2277
2278         fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2279         fw.data_len = bnx2_TPAT_b06FwDataLen;
2280         fw.data_index = 0;
2281         fw.data = bnx2_TPAT_b06FwData;
2282
2283         fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2284         fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2285         fw.sbss_index = 0;
2286         fw.sbss = bnx2_TPAT_b06FwSbss;
2287
2288         fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2289         fw.bss_len = bnx2_TPAT_b06FwBssLen;
2290         fw.bss_index = 0;
2291         fw.bss = bnx2_TPAT_b06FwBss;
2292
2293         fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2294         fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2295         fw.rodata_index = 0;
2296         fw.rodata = bnx2_TPAT_b06FwRodata;
2297
2298         load_cpu_fw(bp, &cpu_reg, &fw);
2299
2300         /* Initialize the Completion Processor. */
2301         cpu_reg.mode = BNX2_COM_CPU_MODE;
2302         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2303         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2304         cpu_reg.state = BNX2_COM_CPU_STATE;
2305         cpu_reg.state_value_clear = 0xffffff;
2306         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2307         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2308         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2309         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2310         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2311         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2312         cpu_reg.mips_view_base = 0x8000000;
2313     
2314         fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2315         fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2316         fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2317         fw.start_addr = bnx2_COM_b06FwStartAddr;
2318
2319         fw.text_addr = bnx2_COM_b06FwTextAddr;
2320         fw.text_len = bnx2_COM_b06FwTextLen;
2321         fw.text_index = 0;
2322         fw.text = bnx2_COM_b06FwText;
2323
2324         fw.data_addr = bnx2_COM_b06FwDataAddr;
2325         fw.data_len = bnx2_COM_b06FwDataLen;
2326         fw.data_index = 0;
2327         fw.data = bnx2_COM_b06FwData;
2328
2329         fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2330         fw.sbss_len = bnx2_COM_b06FwSbssLen;
2331         fw.sbss_index = 0;
2332         fw.sbss = bnx2_COM_b06FwSbss;
2333
2334         fw.bss_addr = bnx2_COM_b06FwBssAddr;
2335         fw.bss_len = bnx2_COM_b06FwBssLen;
2336         fw.bss_index = 0;
2337         fw.bss = bnx2_COM_b06FwBss;
2338
2339         fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2340         fw.rodata_len = bnx2_COM_b06FwRodataLen;
2341         fw.rodata_index = 0;
2342         fw.rodata = bnx2_COM_b06FwRodata;
2343
2344         load_cpu_fw(bp, &cpu_reg, &fw);
2345
2346 }
2347
2348 static int
2349 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2350 {
2351         u16 pmcsr;
2352
2353         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2354
2355         switch (state) {
2356         case PCI_D0: {
2357                 u32 val;
2358
2359                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2360                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2361                         PCI_PM_CTRL_PME_STATUS);
2362
2363                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2364                         /* delay required during transition out of D3hot */
2365                         msleep(20);
2366
2367                 val = REG_RD(bp, BNX2_EMAC_MODE);
2368                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2369                 val &= ~BNX2_EMAC_MODE_MPKT;
2370                 REG_WR(bp, BNX2_EMAC_MODE, val);
2371
2372                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2373                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2374                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2375                 break;
2376         }
2377         case PCI_D3hot: {
2378                 int i;
2379                 u32 val, wol_msg;
2380
2381                 if (bp->wol) {
2382                         u32 advertising;
2383                         u8 autoneg;
2384
2385                         autoneg = bp->autoneg;
2386                         advertising = bp->advertising;
2387
2388                         bp->autoneg = AUTONEG_SPEED;
2389                         bp->advertising = ADVERTISED_10baseT_Half |
2390                                 ADVERTISED_10baseT_Full |
2391                                 ADVERTISED_100baseT_Half |
2392                                 ADVERTISED_100baseT_Full |
2393                                 ADVERTISED_Autoneg;
2394
2395                         bnx2_setup_copper_phy(bp);
2396
2397                         bp->autoneg = autoneg;
2398                         bp->advertising = advertising;
2399
2400                         bnx2_set_mac_addr(bp);
2401
2402                         val = REG_RD(bp, BNX2_EMAC_MODE);
2403
2404                         /* Enable port mode. */
2405                         val &= ~BNX2_EMAC_MODE_PORT;
2406                         val |= BNX2_EMAC_MODE_PORT_MII |
2407                                BNX2_EMAC_MODE_MPKT_RCVD |
2408                                BNX2_EMAC_MODE_ACPI_RCVD |
2409                                BNX2_EMAC_MODE_MPKT;
2410
2411                         REG_WR(bp, BNX2_EMAC_MODE, val);
2412
2413                         /* receive all multicast */
2414                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2415                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2416                                        0xffffffff);
2417                         }
2418                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2419                                BNX2_EMAC_RX_MODE_SORT_MODE);
2420
2421                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2422                               BNX2_RPM_SORT_USER0_MC_EN;
2423                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2424                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2425                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2426                                BNX2_RPM_SORT_USER0_ENA);
2427
2428                         /* Need to enable EMAC and RPM for WOL. */
2429                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2430                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2431                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2432                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2433
2434                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2435                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2436                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2437
2438                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2439                 }
2440                 else {
2441                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2442                 }
2443
2444                 if (!(bp->flags & NO_WOL_FLAG))
2445                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2446
2447                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2448                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2449                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2450
2451                         if (bp->wol)
2452                                 pmcsr |= 3;
2453                 }
2454                 else {
2455                         pmcsr |= 3;
2456                 }
2457                 if (bp->wol) {
2458                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2459                 }
2460                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2461                                       pmcsr);
2462
2463                 /* No more memory access after this point until
2464                  * device is brought back to D0.
2465                  */
2466                 udelay(50);
2467                 break;
2468         }
2469         default:
2470                 return -EINVAL;
2471         }
2472         return 0;
2473 }
2474
2475 static int
2476 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2477 {
2478         u32 val;
2479         int j;
2480
2481         /* Request access to the flash interface. */
2482         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2483         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2484                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2485                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2486                         break;
2487
2488                 udelay(5);
2489         }
2490
2491         if (j >= NVRAM_TIMEOUT_COUNT)
2492                 return -EBUSY;
2493
2494         return 0;
2495 }
2496
2497 static int
2498 bnx2_release_nvram_lock(struct bnx2 *bp)
2499 {
2500         int j;
2501         u32 val;
2502
2503         /* Relinquish nvram interface. */
2504         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2505
2506         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2507                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2508                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2509                         break;
2510
2511                 udelay(5);
2512         }
2513
2514         if (j >= NVRAM_TIMEOUT_COUNT)
2515                 return -EBUSY;
2516
2517         return 0;
2518 }
2519
2520
2521 static int
2522 bnx2_enable_nvram_write(struct bnx2 *bp)
2523 {
2524         u32 val;
2525
2526         val = REG_RD(bp, BNX2_MISC_CFG);
2527         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2528
2529         if (!bp->flash_info->buffered) {
2530                 int j;
2531
2532                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2533                 REG_WR(bp, BNX2_NVM_COMMAND,
2534                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2535
2536                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2537                         udelay(5);
2538
2539                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2540                         if (val & BNX2_NVM_COMMAND_DONE)
2541                                 break;
2542                 }
2543
2544                 if (j >= NVRAM_TIMEOUT_COUNT)
2545                         return -EBUSY;
2546         }
2547         return 0;
2548 }
2549
2550 static void
2551 bnx2_disable_nvram_write(struct bnx2 *bp)
2552 {
2553         u32 val;
2554
2555         val = REG_RD(bp, BNX2_MISC_CFG);
2556         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2557 }
2558
2559
2560 static void
2561 bnx2_enable_nvram_access(struct bnx2 *bp)
2562 {
2563         u32 val;
2564
2565         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2566         /* Enable both bits, even on read. */
2567         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2568                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2569 }
2570
2571 static void
2572 bnx2_disable_nvram_access(struct bnx2 *bp)
2573 {
2574         u32 val;
2575
2576         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2577         /* Disable both bits, even after read. */
2578         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2579                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2580                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2581 }
2582
2583 static int
2584 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2585 {
2586         u32 cmd;
2587         int j;
2588
2589         if (bp->flash_info->buffered)
2590                 /* Buffered flash, no erase needed */
2591                 return 0;
2592
2593         /* Build an erase command */
2594         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2595               BNX2_NVM_COMMAND_DOIT;
2596
2597         /* Need to clear DONE bit separately. */
2598         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2599
2600         /* Address of the NVRAM to read from. */
2601         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2602
2603         /* Issue an erase command. */
2604         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2605
2606         /* Wait for completion. */
2607         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2608                 u32 val;
2609
2610                 udelay(5);
2611
2612                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2613                 if (val & BNX2_NVM_COMMAND_DONE)
2614                         break;
2615         }
2616
2617         if (j >= NVRAM_TIMEOUT_COUNT)
2618                 return -EBUSY;
2619
2620         return 0;
2621 }
2622
2623 static int
2624 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2625 {
2626         u32 cmd;
2627         int j;
2628
2629         /* Build the command word. */
2630         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2631
2632         /* Calculate an offset of a buffered flash. */
2633         if (bp->flash_info->buffered) {
2634                 offset = ((offset / bp->flash_info->page_size) <<
2635                            bp->flash_info->page_bits) +
2636                           (offset % bp->flash_info->page_size);
2637         }
2638
2639         /* Need to clear DONE bit separately. */
2640         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2641
2642         /* Address of the NVRAM to read from. */
2643         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2644
2645         /* Issue a read command. */
2646         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2647
2648         /* Wait for completion. */
2649         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2650                 u32 val;
2651
2652                 udelay(5);
2653
2654                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2655                 if (val & BNX2_NVM_COMMAND_DONE) {
2656                         val = REG_RD(bp, BNX2_NVM_READ);
2657
2658                         val = be32_to_cpu(val);
2659                         memcpy(ret_val, &val, 4);
2660                         break;
2661                 }
2662         }
2663         if (j >= NVRAM_TIMEOUT_COUNT)
2664                 return -EBUSY;
2665
2666         return 0;
2667 }
2668
2669
2670 static int
2671 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2672 {
2673         u32 cmd, val32;
2674         int j;
2675
2676         /* Build the command word. */
2677         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2678
2679         /* Calculate an offset of a buffered flash. */
2680         if (bp->flash_info->buffered) {
2681                 offset = ((offset / bp->flash_info->page_size) <<
2682                           bp->flash_info->page_bits) +
2683                          (offset % bp->flash_info->page_size);
2684         }
2685
2686         /* Need to clear DONE bit separately. */
2687         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2688
2689         memcpy(&val32, val, 4);
2690         val32 = cpu_to_be32(val32);
2691
2692         /* Write the data. */
2693         REG_WR(bp, BNX2_NVM_WRITE, val32);
2694
2695         /* Address of the NVRAM to write to. */
2696         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2697
2698         /* Issue the write command. */
2699         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2700
2701         /* Wait for completion. */
2702         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2703                 udelay(5);
2704
2705                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2706                         break;
2707         }
2708         if (j >= NVRAM_TIMEOUT_COUNT)
2709                 return -EBUSY;
2710
2711         return 0;
2712 }
2713
2714 static int
2715 bnx2_init_nvram(struct bnx2 *bp)
2716 {
2717         u32 val;
2718         int j, entry_count, rc;
2719         struct flash_spec *flash;
2720
2721         /* Determine the selected interface. */
2722         val = REG_RD(bp, BNX2_NVM_CFG1);
2723
2724         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2725
2726         rc = 0;
2727         if (val & 0x40000000) {
2728
2729                 /* Flash interface has been reconfigured */
2730                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2731                      j++, flash++) {
2732                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2733                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2734                                 bp->flash_info = flash;
2735                                 break;
2736                         }
2737                 }
2738         }
2739         else {
2740                 u32 mask;
2741                 /* Not yet been reconfigured */
2742
2743                 if (val & (1 << 23))
2744                         mask = FLASH_BACKUP_STRAP_MASK;
2745                 else
2746                         mask = FLASH_STRAP_MASK;
2747
2748                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2749                         j++, flash++) {
2750
2751                         if ((val & mask) == (flash->strapping & mask)) {
2752                                 bp->flash_info = flash;
2753
2754                                 /* Request access to the flash interface. */
2755                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2756                                         return rc;
2757
2758                                 /* Enable access to flash interface */
2759                                 bnx2_enable_nvram_access(bp);
2760
2761                                 /* Reconfigure the flash interface */
2762                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2763                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2764                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2765                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2766
2767                                 /* Disable access to flash interface */
2768                                 bnx2_disable_nvram_access(bp);
2769                                 bnx2_release_nvram_lock(bp);
2770
2771                                 break;
2772                         }
2773                 }
2774         } /* if (val & 0x40000000) */
2775
2776         if (j == entry_count) {
2777                 bp->flash_info = NULL;
2778                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2779                 return -ENODEV;
2780         }
2781
2782         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2783         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2784         if (val)
2785                 bp->flash_size = val;
2786         else
2787                 bp->flash_size = bp->flash_info->total_size;
2788
2789         return rc;
2790 }
2791
2792 static int
2793 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2794                 int buf_size)
2795 {
2796         int rc = 0;
2797         u32 cmd_flags, offset32, len32, extra;
2798
2799         if (buf_size == 0)
2800                 return 0;
2801
2802         /* Request access to the flash interface. */
2803         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2804                 return rc;
2805
2806         /* Enable access to flash interface */
2807         bnx2_enable_nvram_access(bp);
2808
2809         len32 = buf_size;
2810         offset32 = offset;
2811         extra = 0;
2812
2813         cmd_flags = 0;
2814
2815         if (offset32 & 3) {
2816                 u8 buf[4];
2817                 u32 pre_len;
2818
2819                 offset32 &= ~3;
2820                 pre_len = 4 - (offset & 3);
2821
2822                 if (pre_len >= len32) {
2823                         pre_len = len32;
2824                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2825                                     BNX2_NVM_COMMAND_LAST;
2826                 }
2827                 else {
2828                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2829                 }
2830
2831                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2832
2833                 if (rc)
2834                         return rc;
2835
2836                 memcpy(ret_buf, buf + (offset & 3), pre_len);
2837
2838                 offset32 += 4;
2839                 ret_buf += pre_len;
2840                 len32 -= pre_len;
2841         }
2842         if (len32 & 3) {
2843                 extra = 4 - (len32 & 3);
2844                 len32 = (len32 + 4) & ~3;
2845         }
2846
2847         if (len32 == 4) {
2848                 u8 buf[4];
2849
2850                 if (cmd_flags)
2851                         cmd_flags = BNX2_NVM_COMMAND_LAST;
2852                 else
2853                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2854                                     BNX2_NVM_COMMAND_LAST;
2855
2856                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2857
2858                 memcpy(ret_buf, buf, 4 - extra);
2859         }
2860         else if (len32 > 0) {
2861                 u8 buf[4];
2862
2863                 /* Read the first word. */
2864                 if (cmd_flags)
2865                         cmd_flags = 0;
2866                 else
2867                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2868
2869                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2870
2871                 /* Advance to the next dword. */
2872                 offset32 += 4;
2873                 ret_buf += 4;
2874                 len32 -= 4;
2875
2876                 while (len32 > 4 && rc == 0) {
2877                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2878
2879                         /* Advance to the next dword. */
2880                         offset32 += 4;
2881                         ret_buf += 4;
2882                         len32 -= 4;
2883                 }
2884
2885                 if (rc)
2886                         return rc;
2887
2888                 cmd_flags = BNX2_NVM_COMMAND_LAST;
2889                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2890
2891                 memcpy(ret_buf, buf, 4 - extra);
2892         }
2893
2894         /* Disable access to flash interface */
2895         bnx2_disable_nvram_access(bp);
2896
2897         bnx2_release_nvram_lock(bp);
2898
2899         return rc;
2900 }
2901
2902 static int
2903 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2904                 int buf_size)
2905 {
2906         u32 written, offset32, len32;
2907         u8 *buf, start[4], end[4];
2908         int rc = 0;
2909         int align_start, align_end;
2910
2911         buf = data_buf;
2912         offset32 = offset;
2913         len32 = buf_size;
2914         align_start = align_end = 0;
2915
2916         if ((align_start = (offset32 & 3))) {
2917                 offset32 &= ~3;
2918                 len32 += align_start;
2919                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2920                         return rc;
2921         }
2922
2923         if (len32 & 3) {
2924                 if ((len32 > 4) || !align_start) {
2925                         align_end = 4 - (len32 & 3);
2926                         len32 += align_end;
2927                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2928                                 end, 4))) {
2929                                 return rc;
2930                         }
2931                 }
2932         }
2933
2934         if (align_start || align_end) {
2935                 buf = kmalloc(len32, GFP_KERNEL);
2936                 if (buf == 0)
2937                         return -ENOMEM;
2938                 if (align_start) {
2939                         memcpy(buf, start, 4);
2940                 }
2941                 if (align_end) {
2942                         memcpy(buf + len32 - 4, end, 4);
2943                 }
2944                 memcpy(buf + align_start, data_buf, buf_size);
2945         }
2946
2947         written = 0;
2948         while ((written < len32) && (rc == 0)) {
2949                 u32 page_start, page_end, data_start, data_end;
2950                 u32 addr, cmd_flags;
2951                 int i;
2952                 u8 flash_buffer[264];
2953
2954                 /* Find the page_start addr */
2955                 page_start = offset32 + written;
2956                 page_start -= (page_start % bp->flash_info->page_size);
2957                 /* Find the page_end addr */
2958                 page_end = page_start + bp->flash_info->page_size;
2959                 /* Find the data_start addr */
2960                 data_start = (written == 0) ? offset32 : page_start;
2961                 /* Find the data_end addr */
2962                 data_end = (page_end > offset32 + len32) ? 
2963                         (offset32 + len32) : page_end;
2964
2965                 /* Request access to the flash interface. */
2966                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2967                         goto nvram_write_end;
2968
2969                 /* Enable access to flash interface */
2970                 bnx2_enable_nvram_access(bp);
2971
2972                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2973                 if (bp->flash_info->buffered == 0) {
2974                         int j;
2975
2976                         /* Read the whole page into the buffer
2977                          * (non-buffer flash only) */
2978                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
2979                                 if (j == (bp->flash_info->page_size - 4)) {
2980                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
2981                                 }
2982                                 rc = bnx2_nvram_read_dword(bp,
2983                                         page_start + j, 
2984                                         &flash_buffer[j], 
2985                                         cmd_flags);
2986
2987                                 if (rc)
2988                                         goto nvram_write_end;
2989
2990                                 cmd_flags = 0;
2991                         }
2992                 }
2993
2994                 /* Enable writes to flash interface (unlock write-protect) */
2995                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
2996                         goto nvram_write_end;
2997
2998                 /* Erase the page */
2999                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3000                         goto nvram_write_end;
3001
3002                 /* Re-enable the write again for the actual write */
3003                 bnx2_enable_nvram_write(bp);
3004
3005                 /* Loop to write back the buffer data from page_start to
3006                  * data_start */
3007                 i = 0;
3008                 if (bp->flash_info->buffered == 0) {
3009                         for (addr = page_start; addr < data_start;
3010                                 addr += 4, i += 4) {
3011                                 
3012                                 rc = bnx2_nvram_write_dword(bp, addr,
3013                                         &flash_buffer[i], cmd_flags);
3014
3015                                 if (rc != 0)
3016                                         goto nvram_write_end;
3017
3018                                 cmd_flags = 0;
3019                         }
3020                 }
3021
3022                 /* Loop to write the new data from data_start to data_end */
3023                 for (addr = data_start; addr < data_end; addr += 4, i++) {
3024                         if ((addr == page_end - 4) ||
3025                                 ((bp->flash_info->buffered) &&
3026                                  (addr == data_end - 4))) {
3027
3028                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3029                         }
3030                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3031                                 cmd_flags);
3032
3033                         if (rc != 0)
3034                                 goto nvram_write_end;
3035
3036                         cmd_flags = 0;
3037                         buf += 4;
3038                 }
3039
3040                 /* Loop to write back the buffer data from data_end
3041                  * to page_end */
3042                 if (bp->flash_info->buffered == 0) {
3043                         for (addr = data_end; addr < page_end;
3044                                 addr += 4, i += 4) {
3045                         
3046                                 if (addr == page_end-4) {
3047                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3048                                 }
3049                                 rc = bnx2_nvram_write_dword(bp, addr,
3050                                         &flash_buffer[i], cmd_flags);
3051
3052                                 if (rc != 0)
3053                                         goto nvram_write_end;
3054
3055                                 cmd_flags = 0;
3056                         }
3057                 }
3058
3059                 /* Disable writes to flash interface (lock write-protect) */
3060                 bnx2_disable_nvram_write(bp);
3061
3062                 /* Disable access to flash interface */
3063                 bnx2_disable_nvram_access(bp);
3064                 bnx2_release_nvram_lock(bp);
3065
3066                 /* Increment written */
3067                 written += data_end - data_start;
3068         }
3069
3070 nvram_write_end:
3071         if (align_start || align_end)
3072                 kfree(buf);
3073         return rc;
3074 }
3075
3076 static int
3077 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3078 {
3079         u32 val;
3080         int i, rc = 0;
3081
3082         /* Wait for the current PCI transaction to complete before
3083          * issuing a reset. */
3084         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3085                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3086                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3087                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3088                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3089         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3090         udelay(5);
3091
3092         /* Wait for the firmware to tell us it is ok to issue a reset. */
3093         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3094
3095         /* Deposit a driver reset signature so the firmware knows that
3096          * this is a soft reset. */
3097         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3098                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3099
3100         /* Do a dummy read to force the chip to complete all current transaction
3101          * before we issue a reset. */
3102         val = REG_RD(bp, BNX2_MISC_ID);
3103
3104         val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3105               BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3106               BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3107
3108         /* Chip reset. */
3109         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3110
3111         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3112             (CHIP_ID(bp) == CHIP_ID_5706_A1))
3113                 msleep(15);
3114
3115         /* Reset takes approximate 30 usec */
3116         for (i = 0; i < 10; i++) {
3117                 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3118                 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3119                             BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3120                         break;
3121                 }
3122                 udelay(10);
3123         }
3124
3125         if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3126                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3127                 printk(KERN_ERR PFX "Chip reset did not complete\n");
3128                 return -EBUSY;
3129         }
3130
3131         /* Make sure byte swapping is properly configured. */
3132         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3133         if (val != 0x01020304) {
3134                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3135                 return -ENODEV;
3136         }
3137
3138         /* Wait for the firmware to finish its initialization. */
3139         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3140         if (rc)
3141                 return rc;
3142
3143         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3144                 /* Adjust the voltage regular to two steps lower.  The default
3145                  * of this register is 0x0000000e. */
3146                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3147
3148                 /* Remove bad rbuf memory from the free pool. */
3149                 rc = bnx2_alloc_bad_rbuf(bp);
3150         }
3151
3152         return rc;
3153 }
3154
3155 static int
3156 bnx2_init_chip(struct bnx2 *bp)
3157 {
3158         u32 val;
3159         int rc;
3160
3161         /* Make sure the interrupt is not active. */
3162         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3163
3164         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3165               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3166 #ifdef __BIG_ENDIAN
3167               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | 
3168 #endif
3169               BNX2_DMA_CONFIG_CNTL_WORD_SWAP | 
3170               DMA_READ_CHANS << 12 |
3171               DMA_WRITE_CHANS << 16;
3172
3173         val |= (0x2 << 20) | (1 << 11);
3174
3175         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3176                 val |= (1 << 23);
3177
3178         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3179             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3180                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3181
3182         REG_WR(bp, BNX2_DMA_CONFIG, val);
3183
3184         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3185                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3186                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3187                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3188         }
3189
3190         if (bp->flags & PCIX_FLAG) {
3191                 u16 val16;
3192
3193                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3194                                      &val16);
3195                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3196                                       val16 & ~PCI_X_CMD_ERO);
3197         }
3198
3199         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3200                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3201                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3202                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3203
3204         /* Initialize context mapping and zero out the quick contexts.  The
3205          * context block must have already been enabled. */
3206         bnx2_init_context(bp);
3207
3208         bnx2_init_cpus(bp);
3209         bnx2_init_nvram(bp);
3210
3211         bnx2_set_mac_addr(bp);
3212
3213         val = REG_RD(bp, BNX2_MQ_CONFIG);
3214         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3215         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3216         REG_WR(bp, BNX2_MQ_CONFIG, val);
3217
3218         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3219         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3220         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3221
3222         val = (BCM_PAGE_BITS - 8) << 24;
3223         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3224
3225         /* Configure page size. */
3226         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3227         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3228         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3229         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3230
3231         val = bp->mac_addr[0] +
3232               (bp->mac_addr[1] << 8) +
3233               (bp->mac_addr[2] << 16) +
3234               bp->mac_addr[3] +
3235               (bp->mac_addr[4] << 8) +
3236               (bp->mac_addr[5] << 16);
3237         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3238
3239         /* Program the MTU.  Also include 4 bytes for CRC32. */
3240         val = bp->dev->mtu + ETH_HLEN + 4;
3241         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3242                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3243         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3244
3245         bp->last_status_idx = 0;
3246         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3247
3248         /* Set up how to generate a link change interrupt. */
3249         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3250
3251         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3252                (u64) bp->status_blk_mapping & 0xffffffff);
3253         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3254
3255         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3256                (u64) bp->stats_blk_mapping & 0xffffffff);
3257         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3258                (u64) bp->stats_blk_mapping >> 32);
3259
3260         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, 
3261                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3262
3263         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3264                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3265
3266         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3267                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3268
3269         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3270
3271         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3272
3273         REG_WR(bp, BNX2_HC_COM_TICKS,
3274                (bp->com_ticks_int << 16) | bp->com_ticks);
3275
3276         REG_WR(bp, BNX2_HC_CMD_TICKS,
3277                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3278
3279         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3280         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3281
3282         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3283                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3284         else {
3285                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3286                        BNX2_HC_CONFIG_TX_TMR_MODE |
3287                        BNX2_HC_CONFIG_COLLECT_STATS);
3288         }
3289
3290         /* Clear internal stats counters. */
3291         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3292
3293         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3294
3295         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3296             BNX2_PORT_FEATURE_ASF_ENABLED)
3297                 bp->flags |= ASF_ENABLE_FLAG;
3298
3299         /* Initialize the receive filter. */
3300         bnx2_set_rx_mode(bp->dev);
3301
3302         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3303                           0);
3304
3305         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3306         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3307
3308         udelay(20);
3309
3310         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3311
3312         return rc;
3313 }
3314
3315
3316 static void
3317 bnx2_init_tx_ring(struct bnx2 *bp)
3318 {
3319         struct tx_bd *txbd;
3320         u32 val;
3321
3322         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3323                 
3324         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3325         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3326
3327         bp->tx_prod = 0;
3328         bp->tx_cons = 0;
3329         bp->hw_tx_cons = 0;
3330         bp->tx_prod_bseq = 0;
3331         
3332         val = BNX2_L2CTX_TYPE_TYPE_L2;
3333         val |= BNX2_L2CTX_TYPE_SIZE_L2;
3334         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3335
3336         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3337         val |= 8 << 16;
3338         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3339
3340         val = (u64) bp->tx_desc_mapping >> 32;
3341         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3342
3343         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3344         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3345 }
3346
3347 static void
3348 bnx2_init_rx_ring(struct bnx2 *bp)
3349 {
3350         struct rx_bd *rxbd;
3351         int i;
3352         u16 prod, ring_prod; 
3353         u32 val;
3354
3355         /* 8 for CRC and VLAN */
3356         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3357         /* 8 for alignment */
3358         bp->rx_buf_size = bp->rx_buf_use_size + 8;
3359
3360         ring_prod = prod = bp->rx_prod = 0;
3361         bp->rx_cons = 0;
3362         bp->hw_rx_cons = 0;
3363         bp->rx_prod_bseq = 0;
3364                 
3365         for (i = 0; i < bp->rx_max_ring; i++) {
3366                 int j;
3367
3368                 rxbd = &bp->rx_desc_ring[i][0];
3369                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3370                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3371                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3372                 }
3373                 if (i == (bp->rx_max_ring - 1))
3374                         j = 0;
3375                 else
3376                         j = i + 1;
3377                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3378                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3379                                        0xffffffff;
3380         }
3381
3382         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3383         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3384         val |= 0x02 << 8;
3385         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3386
3387         val = (u64) bp->rx_desc_mapping[0] >> 32;
3388         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3389
3390         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3391         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3392
3393         for (i = 0; i < bp->rx_ring_size; i++) {
3394                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3395                         break;
3396                 }
3397                 prod = NEXT_RX_BD(prod);
3398                 ring_prod = RX_RING_IDX(prod);
3399         }
3400         bp->rx_prod = prod;
3401
3402         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3403
3404         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3405 }
3406
3407 static void
3408 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3409 {
3410         u32 num_rings, max;
3411
3412         bp->rx_ring_size = size;
3413         num_rings = 1;
3414         while (size > MAX_RX_DESC_CNT) {
3415                 size -= MAX_RX_DESC_CNT;
3416                 num_rings++;
3417         }
3418         /* round to next power of 2 */
3419         max = MAX_RX_RINGS;
3420         while ((max & num_rings) == 0)
3421                 max >>= 1;
3422
3423         if (num_rings != max)
3424                 max <<= 1;
3425
3426         bp->rx_max_ring = max;
3427         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3428 }
3429
3430 static void
3431 bnx2_free_tx_skbs(struct bnx2 *bp)
3432 {
3433         int i;
3434
3435         if (bp->tx_buf_ring == NULL)
3436                 return;
3437
3438         for (i = 0; i < TX_DESC_CNT; ) {
3439                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3440                 struct sk_buff *skb = tx_buf->skb;
3441                 int j, last;
3442
3443                 if (skb == NULL) {
3444                         i++;
3445                         continue;
3446                 }
3447
3448                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3449                         skb_headlen(skb), PCI_DMA_TODEVICE);
3450
3451                 tx_buf->skb = NULL;
3452
3453                 last = skb_shinfo(skb)->nr_frags;
3454                 for (j = 0; j < last; j++) {
3455                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3456                         pci_unmap_page(bp->pdev,
3457                                 pci_unmap_addr(tx_buf, mapping),
3458                                 skb_shinfo(skb)->frags[j].size,
3459                                 PCI_DMA_TODEVICE);
3460                 }
3461                 dev_kfree_skb_any(skb);
3462                 i += j + 1;
3463         }
3464
3465 }
3466
3467 static void
3468 bnx2_free_rx_skbs(struct bnx2 *bp)
3469 {
3470         int i;
3471
3472         if (bp->rx_buf_ring == NULL)
3473                 return;
3474
3475         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3476                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3477                 struct sk_buff *skb = rx_buf->skb;
3478
3479                 if (skb == NULL)
3480                         continue;
3481
3482                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3483                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3484
3485                 rx_buf->skb = NULL;
3486
3487                 dev_kfree_skb_any(skb);
3488         }
3489 }
3490
3491 static void
3492 bnx2_free_skbs(struct bnx2 *bp)
3493 {
3494         bnx2_free_tx_skbs(bp);
3495         bnx2_free_rx_skbs(bp);
3496 }
3497
3498 static int
3499 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3500 {
3501         int rc;
3502
3503         rc = bnx2_reset_chip(bp, reset_code);
3504         bnx2_free_skbs(bp);
3505         if (rc)
3506                 return rc;
3507
3508         bnx2_init_chip(bp);
3509         bnx2_init_tx_ring(bp);
3510         bnx2_init_rx_ring(bp);
3511         return 0;
3512 }
3513
3514 static int
3515 bnx2_init_nic(struct bnx2 *bp)
3516 {
3517         int rc;
3518
3519         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3520                 return rc;
3521
3522         bnx2_init_phy(bp);
3523         bnx2_set_link(bp);
3524         return 0;
3525 }
3526
3527 static int
3528 bnx2_test_registers(struct bnx2 *bp)
3529 {
3530         int ret;
3531         int i;
3532         static const struct {
3533                 u16   offset;
3534                 u16   flags;
3535                 u32   rw_mask;
3536                 u32   ro_mask;
3537         } reg_tbl[] = {
3538                 { 0x006c, 0, 0x00000000, 0x0000003f },
3539                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3540                 { 0x0094, 0, 0x00000000, 0x00000000 },
3541
3542                 { 0x0404, 0, 0x00003f00, 0x00000000 },
3543                 { 0x0418, 0, 0x00000000, 0xffffffff },
3544                 { 0x041c, 0, 0x00000000, 0xffffffff },
3545                 { 0x0420, 0, 0x00000000, 0x80ffffff },
3546                 { 0x0424, 0, 0x00000000, 0x00000000 },
3547                 { 0x0428, 0, 0x00000000, 0x00000001 },
3548                 { 0x0450, 0, 0x00000000, 0x0000ffff },
3549                 { 0x0454, 0, 0x00000000, 0xffffffff },
3550                 { 0x0458, 0, 0x00000000, 0xffffffff },
3551
3552                 { 0x0808, 0, 0x00000000, 0xffffffff },
3553                 { 0x0854, 0, 0x00000000, 0xffffffff },
3554                 { 0x0868, 0, 0x00000000, 0x77777777 },
3555                 { 0x086c, 0, 0x00000000, 0x77777777 },
3556                 { 0x0870, 0, 0x00000000, 0x77777777 },
3557                 { 0x0874, 0, 0x00000000, 0x77777777 },
3558
3559                 { 0x0c00, 0, 0x00000000, 0x00000001 },
3560                 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3561                 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3562
3563                 { 0x1000, 0, 0x00000000, 0x00000001 },
3564                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3565
3566                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3567                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3568                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3569                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3570                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3571                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3572                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3573                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3574                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3575                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3576
3577                 { 0x1800, 0, 0x00000000, 0x00000001 },
3578                 { 0x1804, 0, 0x00000000, 0x00000003 },
3579
3580                 { 0x2800, 0, 0x00000000, 0x00000001 },
3581                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3582                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3583                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3584                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3585                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3586                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3587                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3588                 { 0x2840, 0, 0x00000000, 0xffffffff },
3589                 { 0x2844, 0, 0x00000000, 0xffffffff },
3590                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3591                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3592
3593                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3594                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3595
3596                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3597                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3598                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3599                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3600                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3601                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3602                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3603                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3604                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3605
3606                 { 0x5004, 0, 0x00000000, 0x0000007f },
3607                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3608                 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3609
3610                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3611                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3612                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3613                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3614                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3615                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3616                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3617                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3618                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3619
3620                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3621                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3622                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3623                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3624                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3625                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3626                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3627                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3628                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3629                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3630                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3631                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3632                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3633                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3634                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3635                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3636                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3637                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3638                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3639                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3640                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3641                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3642                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3643
3644                 { 0xffff, 0, 0x00000000, 0x00000000 },
3645         };
3646
3647         ret = 0;
3648         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3649                 u32 offset, rw_mask, ro_mask, save_val, val;
3650
3651                 offset = (u32) reg_tbl[i].offset;
3652                 rw_mask = reg_tbl[i].rw_mask;
3653                 ro_mask = reg_tbl[i].ro_mask;
3654
3655                 save_val = readl(bp->regview + offset);
3656
3657                 writel(0, bp->regview + offset);
3658
3659                 val = readl(bp->regview + offset);
3660                 if ((val & rw_mask) != 0) {
3661                         goto reg_test_err;
3662                 }
3663
3664                 if ((val & ro_mask) != (save_val & ro_mask)) {
3665                         goto reg_test_err;
3666                 }
3667
3668                 writel(0xffffffff, bp->regview + offset);
3669
3670                 val = readl(bp->regview + offset);
3671                 if ((val & rw_mask) != rw_mask) {
3672                         goto reg_test_err;
3673                 }
3674
3675                 if ((val & ro_mask) != (save_val & ro_mask)) {
3676                         goto reg_test_err;
3677                 }
3678
3679                 writel(save_val, bp->regview + offset);
3680                 continue;
3681
3682 reg_test_err:
3683                 writel(save_val, bp->regview + offset);
3684                 ret = -ENODEV;
3685                 break;
3686         }
3687         return ret;
3688 }
3689
3690 static int
3691 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3692 {
3693         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3694                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3695         int i;
3696
3697         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3698                 u32 offset;
3699
3700                 for (offset = 0; offset < size; offset += 4) {
3701
3702                         REG_WR_IND(bp, start + offset, test_pattern[i]);
3703
3704                         if (REG_RD_IND(bp, start + offset) !=
3705                                 test_pattern[i]) {
3706                                 return -ENODEV;
3707                         }
3708                 }
3709         }
3710         return 0;
3711 }
3712
3713 static int
3714 bnx2_test_memory(struct bnx2 *bp)
3715 {
3716         int ret = 0;
3717         int i;
3718         static const struct {
3719                 u32   offset;
3720                 u32   len;
3721         } mem_tbl[] = {
3722                 { 0x60000,  0x4000 },
3723                 { 0xa0000,  0x3000 },
3724                 { 0xe0000,  0x4000 },
3725                 { 0x120000, 0x4000 },
3726                 { 0x1a0000, 0x4000 },
3727                 { 0x160000, 0x4000 },
3728                 { 0xffffffff, 0    },
3729         };
3730
3731         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3732                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3733                         mem_tbl[i].len)) != 0) {
3734                         return ret;
3735                 }
3736         }
3737         
3738         return ret;
3739 }
3740
3741 #define BNX2_MAC_LOOPBACK       0
3742 #define BNX2_PHY_LOOPBACK       1
3743
3744 static int
3745 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3746 {
3747         unsigned int pkt_size, num_pkts, i;
3748         struct sk_buff *skb, *rx_skb;
3749         unsigned char *packet;
3750         u16 rx_start_idx, rx_idx;
3751         dma_addr_t map;
3752         struct tx_bd *txbd;
3753         struct sw_bd *rx_buf;
3754         struct l2_fhdr *rx_hdr;
3755         int ret = -ENODEV;
3756
3757         if (loopback_mode == BNX2_MAC_LOOPBACK) {
3758                 bp->loopback = MAC_LOOPBACK;
3759                 bnx2_set_mac_loopback(bp);
3760         }
3761         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3762                 bp->loopback = 0;
3763                 bnx2_set_phy_loopback(bp);
3764         }
3765         else
3766                 return -EINVAL;
3767
3768         pkt_size = 1514;
3769         skb = dev_alloc_skb(pkt_size);
3770         if (!skb)
3771                 return -ENOMEM;
3772         packet = skb_put(skb, pkt_size);
3773         memcpy(packet, bp->mac_addr, 6);
3774         memset(packet + 6, 0x0, 8);
3775         for (i = 14; i < pkt_size; i++)
3776                 packet[i] = (unsigned char) (i & 0xff);
3777
3778         map = pci_map_single(bp->pdev, skb->data, pkt_size,
3779                 PCI_DMA_TODEVICE);
3780
3781         REG_WR(bp, BNX2_HC_COMMAND,
3782                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3783
3784         REG_RD(bp, BNX2_HC_COMMAND);
3785
3786         udelay(5);
3787         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3788
3789         num_pkts = 0;
3790
3791         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3792
3793         txbd->tx_bd_haddr_hi = (u64) map >> 32;
3794         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3795         txbd->tx_bd_mss_nbytes = pkt_size;
3796         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3797
3798         num_pkts++;
3799         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3800         bp->tx_prod_bseq += pkt_size;
3801
3802         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3803         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3804
3805         udelay(100);
3806
3807         REG_WR(bp, BNX2_HC_COMMAND,
3808                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3809
3810         REG_RD(bp, BNX2_HC_COMMAND);
3811
3812         udelay(5);
3813
3814         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3815         dev_kfree_skb_irq(skb);
3816
3817         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
3818                 goto loopback_test_done;
3819         }
3820
3821         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3822         if (rx_idx != rx_start_idx + num_pkts) {
3823                 goto loopback_test_done;
3824         }
3825
3826         rx_buf = &bp->rx_buf_ring[rx_start_idx];
3827         rx_skb = rx_buf->skb;
3828
3829         rx_hdr = (struct l2_fhdr *) rx_skb->data;
3830         skb_reserve(rx_skb, bp->rx_offset);
3831
3832         pci_dma_sync_single_for_cpu(bp->pdev,
3833                 pci_unmap_addr(rx_buf, mapping),
3834                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3835
3836         if (rx_hdr->l2_fhdr_status &
3837                 (L2_FHDR_ERRORS_BAD_CRC |
3838                 L2_FHDR_ERRORS_PHY_DECODE |
3839                 L2_FHDR_ERRORS_ALIGNMENT |
3840                 L2_FHDR_ERRORS_TOO_SHORT |
3841                 L2_FHDR_ERRORS_GIANT_FRAME)) {
3842
3843                 goto loopback_test_done;
3844         }
3845
3846         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3847                 goto loopback_test_done;
3848         }
3849
3850         for (i = 14; i < pkt_size; i++) {
3851                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3852                         goto loopback_test_done;
3853                 }
3854         }
3855
3856         ret = 0;
3857
3858 loopback_test_done:
3859         bp->loopback = 0;
3860         return ret;
3861 }
3862
3863 #define BNX2_MAC_LOOPBACK_FAILED        1
3864 #define BNX2_PHY_LOOPBACK_FAILED        2
3865 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
3866                                          BNX2_PHY_LOOPBACK_FAILED)
3867
3868 static int
3869 bnx2_test_loopback(struct bnx2 *bp)
3870 {
3871         int rc = 0;
3872
3873         if (!netif_running(bp->dev))
3874                 return BNX2_LOOPBACK_FAILED;
3875
3876         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3877         spin_lock_bh(&bp->phy_lock);
3878         bnx2_init_phy(bp);
3879         spin_unlock_bh(&bp->phy_lock);
3880         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3881                 rc |= BNX2_MAC_LOOPBACK_FAILED;
3882         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3883                 rc |= BNX2_PHY_LOOPBACK_FAILED;
3884         return rc;
3885 }
3886
3887 #define NVRAM_SIZE 0x200
3888 #define CRC32_RESIDUAL 0xdebb20e3
3889
3890 static int
3891 bnx2_test_nvram(struct bnx2 *bp)
3892 {
3893         u32 buf[NVRAM_SIZE / 4];
3894         u8 *data = (u8 *) buf;
3895         int rc = 0;
3896         u32 magic, csum;
3897
3898         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3899                 goto test_nvram_done;
3900
3901         magic = be32_to_cpu(buf[0]);
3902         if (magic != 0x669955aa) {
3903                 rc = -ENODEV;
3904                 goto test_nvram_done;
3905         }
3906
3907         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3908                 goto test_nvram_done;
3909
3910         csum = ether_crc_le(0x100, data);
3911         if (csum != CRC32_RESIDUAL) {
3912                 rc = -ENODEV;
3913                 goto test_nvram_done;
3914         }
3915
3916         csum = ether_crc_le(0x100, data + 0x100);
3917         if (csum != CRC32_RESIDUAL) {
3918                 rc = -ENODEV;
3919         }
3920
3921 test_nvram_done:
3922         return rc;
3923 }
3924
3925 static int
3926 bnx2_test_link(struct bnx2 *bp)
3927 {
3928         u32 bmsr;
3929
3930         spin_lock_bh(&bp->phy_lock);
3931         bnx2_read_phy(bp, MII_BMSR, &bmsr);
3932         bnx2_read_phy(bp, MII_BMSR, &bmsr);
3933         spin_unlock_bh(&bp->phy_lock);
3934                 
3935         if (bmsr & BMSR_LSTATUS) {
3936                 return 0;
3937         }
3938         return -ENODEV;
3939 }
3940
3941 static int
3942 bnx2_test_intr(struct bnx2 *bp)
3943 {
3944         int i;
3945         u16 status_idx;
3946
3947         if (!netif_running(bp->dev))
3948                 return -ENODEV;
3949
3950         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3951
3952         /* This register is not touched during run-time. */
3953         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
3954         REG_RD(bp, BNX2_HC_COMMAND);
3955
3956         for (i = 0; i < 10; i++) {
3957                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3958                         status_idx) {
3959
3960                         break;
3961                 }
3962
3963                 msleep_interruptible(10);
3964         }
3965         if (i < 10)
3966                 return 0;
3967
3968         return -ENODEV;
3969 }
3970
3971 static void
3972 bnx2_timer(unsigned long data)
3973 {
3974         struct bnx2 *bp = (struct bnx2 *) data;
3975         u32 msg;
3976
3977         if (!netif_running(bp->dev))
3978                 return;
3979
3980         if (atomic_read(&bp->intr_sem) != 0)
3981                 goto bnx2_restart_timer;
3982
3983         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
3984         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
3985
3986         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3987             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
3988
3989                 spin_lock(&bp->phy_lock);
3990                 if (bp->serdes_an_pending) {
3991                         bp->serdes_an_pending--;
3992                 }
3993                 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3994                         u32 bmcr;
3995
3996                         bp->current_interval = bp->timer_interval;
3997
3998                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
3999
4000                         if (bmcr & BMCR_ANENABLE) {
4001                                 u32 phy1, phy2;
4002
4003                                 bnx2_write_phy(bp, 0x1c, 0x7c00);
4004                                 bnx2_read_phy(bp, 0x1c, &phy1);
4005
4006                                 bnx2_write_phy(bp, 0x17, 0x0f01);
4007                                 bnx2_read_phy(bp, 0x15, &phy2);
4008                                 bnx2_write_phy(bp, 0x17, 0x0f01);
4009                                 bnx2_read_phy(bp, 0x15, &phy2);
4010
4011                                 if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4012                                         !(phy2 & 0x20)) {       /* no CONFIG */
4013
4014                                         bmcr &= ~BMCR_ANENABLE;
4015                                         bmcr |= BMCR_SPEED1000 |
4016                                                 BMCR_FULLDPLX;
4017                                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4018                                         bp->phy_flags |=
4019                                                 PHY_PARALLEL_DETECT_FLAG;
4020                                 }
4021                         }
4022                 }
4023                 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4024                         (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4025                         u32 phy2;
4026
4027                         bnx2_write_phy(bp, 0x17, 0x0f01);
4028                         bnx2_read_phy(bp, 0x15, &phy2);
4029                         if (phy2 & 0x20) {
4030                                 u32 bmcr;
4031
4032                                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4033                                 bmcr |= BMCR_ANENABLE;
4034                                 bnx2_write_phy(bp, MII_BMCR, bmcr);
4035
4036                                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4037
4038                         }
4039                 }
4040                 else
4041                         bp->current_interval = bp->timer_interval;
4042
4043                 spin_unlock(&bp->phy_lock);
4044         }
4045
4046 bnx2_restart_timer:
4047         mod_timer(&bp->timer, jiffies + bp->current_interval);
4048 }
4049
4050 /* Called with rtnl_lock */
4051 static int
4052 bnx2_open(struct net_device *dev)
4053 {
4054         struct bnx2 *bp = netdev_priv(dev);
4055         int rc;
4056
4057         bnx2_set_power_state(bp, PCI_D0);
4058         bnx2_disable_int(bp);
4059
4060         rc = bnx2_alloc_mem(bp);
4061         if (rc)
4062                 return rc;
4063
4064         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4065                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4066                 !disable_msi) {
4067
4068                 if (pci_enable_msi(bp->pdev) == 0) {
4069                         bp->flags |= USING_MSI_FLAG;
4070                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4071                                         dev);
4072                 }
4073                 else {
4074                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4075                                         SA_SHIRQ, dev->name, dev);
4076                 }
4077         }
4078         else {
4079                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4080                                 dev->name, dev);
4081         }
4082         if (rc) {
4083                 bnx2_free_mem(bp);
4084                 return rc;
4085         }
4086
4087         rc = bnx2_init_nic(bp);
4088
4089         if (rc) {
4090                 free_irq(bp->pdev->irq, dev);
4091                 if (bp->flags & USING_MSI_FLAG) {
4092                         pci_disable_msi(bp->pdev);
4093                         bp->flags &= ~USING_MSI_FLAG;
4094                 }
4095                 bnx2_free_skbs(bp);
4096                 bnx2_free_mem(bp);
4097                 return rc;
4098         }
4099         
4100         mod_timer(&bp->timer, jiffies + bp->current_interval);
4101
4102         atomic_set(&bp->intr_sem, 0);
4103
4104         bnx2_enable_int(bp);
4105
4106         if (bp->flags & USING_MSI_FLAG) {
4107                 /* Test MSI to make sure it is working
4108                  * If MSI test fails, go back to INTx mode
4109                  */
4110                 if (bnx2_test_intr(bp) != 0) {
4111                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4112                                " using MSI, switching to INTx mode. Please"
4113                                " report this failure to the PCI maintainer"
4114                                " and include system chipset information.\n",
4115                                bp->dev->name);
4116
4117                         bnx2_disable_int(bp);
4118                         free_irq(bp->pdev->irq, dev);
4119                         pci_disable_msi(bp->pdev);
4120                         bp->flags &= ~USING_MSI_FLAG;
4121
4122                         rc = bnx2_init_nic(bp);
4123
4124                         if (!rc) {
4125                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4126                                         SA_SHIRQ, dev->name, dev);
4127                         }
4128                         if (rc) {
4129                                 bnx2_free_skbs(bp);
4130                                 bnx2_free_mem(bp);
4131                                 del_timer_sync(&bp->timer);
4132                                 return rc;
4133                         }
4134                         bnx2_enable_int(bp);
4135                 }
4136         }
4137         if (bp->flags & USING_MSI_FLAG) {
4138                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4139         }
4140
4141         netif_start_queue(dev);
4142
4143         return 0;
4144 }
4145
4146 static void
4147 bnx2_reset_task(void *data)
4148 {
4149         struct bnx2 *bp = data;
4150
4151         if (!netif_running(bp->dev))
4152                 return;
4153
4154         bp->in_reset_task = 1;
4155         bnx2_netif_stop(bp);
4156
4157         bnx2_init_nic(bp);
4158
4159         atomic_set(&bp->intr_sem, 1);
4160         bnx2_netif_start(bp);
4161         bp->in_reset_task = 0;
4162 }
4163
4164 static void
4165 bnx2_tx_timeout(struct net_device *dev)
4166 {
4167         struct bnx2 *bp = netdev_priv(dev);
4168
4169         /* This allows the netif to be shutdown gracefully before resetting */
4170         schedule_work(&bp->reset_task);
4171 }
4172
4173 #ifdef BCM_VLAN
4174 /* Called with rtnl_lock */
4175 static void
4176 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4177 {
4178         struct bnx2 *bp = netdev_priv(dev);
4179
4180         bnx2_netif_stop(bp);
4181
4182         bp->vlgrp = vlgrp;
4183         bnx2_set_rx_mode(dev);
4184
4185         bnx2_netif_start(bp);
4186 }
4187
4188 /* Called with rtnl_lock */
4189 static void
4190 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4191 {
4192         struct bnx2 *bp = netdev_priv(dev);
4193
4194         bnx2_netif_stop(bp);
4195
4196         if (bp->vlgrp)
4197                 bp->vlgrp->vlan_devices[vid] = NULL;
4198         bnx2_set_rx_mode(dev);
4199
4200         bnx2_netif_start(bp);
4201 }
4202 #endif
4203
4204 /* Called with dev->xmit_lock.
4205  * hard_start_xmit is pseudo-lockless - a lock is only required when
4206  * the tx queue is full. This way, we get the benefit of lockless
4207  * operations most of the time without the complexities to handle
4208  * netif_stop_queue/wake_queue race conditions.
4209  */
4210 static int
4211 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4212 {
4213         struct bnx2 *bp = netdev_priv(dev);
4214         dma_addr_t mapping;
4215         struct tx_bd *txbd;
4216         struct sw_bd *tx_buf;
4217         u32 len, vlan_tag_flags, last_frag, mss;
4218         u16 prod, ring_prod;
4219         int i;
4220
4221         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4222                 netif_stop_queue(dev);
4223                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4224                         dev->name);
4225
4226                 return NETDEV_TX_BUSY;
4227         }
4228         len = skb_headlen(skb);
4229         prod = bp->tx_prod;
4230         ring_prod = TX_RING_IDX(prod);
4231
4232         vlan_tag_flags = 0;
4233         if (skb->ip_summed == CHECKSUM_HW) {
4234                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4235         }
4236
4237         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4238                 vlan_tag_flags |=
4239                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4240         }
4241 #ifdef BCM_TSO 
4242         if ((mss = skb_shinfo(skb)->tso_size) &&
4243                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4244                 u32 tcp_opt_len, ip_tcp_len;
4245
4246                 if (skb_header_cloned(skb) &&
4247                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4248                         dev_kfree_skb(skb);
4249                         return NETDEV_TX_OK;
4250                 }
4251
4252                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4253                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4254
4255                 tcp_opt_len = 0;
4256                 if (skb->h.th->doff > 5) {
4257                         tcp_opt_len = (skb->h.th->doff - 5) << 2;
4258                 }
4259                 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4260
4261                 skb->nh.iph->check = 0;
4262                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4263                 skb->h.th->check =
4264                         ~csum_tcpudp_magic(skb->nh.iph->saddr,
4265                                             skb->nh.iph->daddr,
4266                                             0, IPPROTO_TCP, 0);
4267
4268                 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4269                         vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4270                                 (tcp_opt_len >> 2)) << 8;
4271                 }
4272         }
4273         else
4274 #endif
4275         {
4276                 mss = 0;
4277         }
4278
4279         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4280         
4281         tx_buf = &bp->tx_buf_ring[ring_prod];
4282         tx_buf->skb = skb;
4283         pci_unmap_addr_set(tx_buf, mapping, mapping);
4284
4285         txbd = &bp->tx_desc_ring[ring_prod];
4286
4287         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4288         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4289         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4290         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4291
4292         last_frag = skb_shinfo(skb)->nr_frags;
4293
4294         for (i = 0; i < last_frag; i++) {
4295                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4296
4297                 prod = NEXT_TX_BD(prod);
4298                 ring_prod = TX_RING_IDX(prod);
4299                 txbd = &bp->tx_desc_ring[ring_prod];
4300
4301                 len = frag->size;
4302                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4303                         len, PCI_DMA_TODEVICE);
4304                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4305                                 mapping, mapping);
4306
4307                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4308                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4309                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4310                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4311
4312         }
4313         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4314
4315         prod = NEXT_TX_BD(prod);
4316         bp->tx_prod_bseq += skb->len;
4317
4318         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4319         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4320
4321         mmiowb();
4322
4323         bp->tx_prod = prod;
4324         dev->trans_start = jiffies;
4325
4326         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4327                 spin_lock(&bp->tx_lock);
4328                 netif_stop_queue(dev);
4329                 
4330                 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4331                         netif_wake_queue(dev);
4332                 spin_unlock(&bp->tx_lock);
4333         }
4334
4335         return NETDEV_TX_OK;
4336 }
4337
4338 /* Called with rtnl_lock */
4339 static int
4340 bnx2_close(struct net_device *dev)
4341 {
4342         struct bnx2 *bp = netdev_priv(dev);
4343         u32 reset_code;
4344
4345         /* Calling flush_scheduled_work() may deadlock because
4346          * linkwatch_event() may be on the workqueue and it will try to get
4347          * the rtnl_lock which we are holding.
4348          */
4349         while (bp->in_reset_task)
4350                 msleep(1);
4351
4352         bnx2_netif_stop(bp);
4353         del_timer_sync(&bp->timer);
4354         if (bp->flags & NO_WOL_FLAG)
4355                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4356         else if (bp->wol)
4357                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4358         else
4359                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4360         bnx2_reset_chip(bp, reset_code);
4361         free_irq(bp->pdev->irq, dev);
4362         if (bp->flags & USING_MSI_FLAG) {
4363                 pci_disable_msi(bp->pdev);
4364                 bp->flags &= ~USING_MSI_FLAG;
4365         }
4366         bnx2_free_skbs(bp);
4367         bnx2_free_mem(bp);
4368         bp->link_up = 0;
4369         netif_carrier_off(bp->dev);
4370         bnx2_set_power_state(bp, PCI_D3hot);
4371         return 0;
4372 }
4373
4374 #define GET_NET_STATS64(ctr)                                    \
4375         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4376         (unsigned long) (ctr##_lo)
4377
4378 #define GET_NET_STATS32(ctr)            \
4379         (ctr##_lo)
4380
4381 #if (BITS_PER_LONG == 64)
4382 #define GET_NET_STATS   GET_NET_STATS64
4383 #else
4384 #define GET_NET_STATS   GET_NET_STATS32
4385 #endif
4386
4387 static struct net_device_stats *
4388 bnx2_get_stats(struct net_device *dev)
4389 {
4390         struct bnx2 *bp = netdev_priv(dev);
4391         struct statistics_block *stats_blk = bp->stats_blk;
4392         struct net_device_stats *net_stats = &bp->net_stats;
4393
4394         if (bp->stats_blk == NULL) {
4395                 return net_stats;
4396         }
4397         net_stats->rx_packets =
4398                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4399                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4400                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4401
4402         net_stats->tx_packets =
4403                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4404                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4405                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4406
4407         net_stats->rx_bytes =
4408                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4409
4410         net_stats->tx_bytes =
4411                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4412
4413         net_stats->multicast = 
4414                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4415
4416         net_stats->collisions = 
4417                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4418
4419         net_stats->rx_length_errors = 
4420                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4421                 stats_blk->stat_EtherStatsOverrsizePkts);
4422
4423         net_stats->rx_over_errors = 
4424                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4425
4426         net_stats->rx_frame_errors = 
4427                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4428
4429         net_stats->rx_crc_errors = 
4430                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4431
4432         net_stats->rx_errors = net_stats->rx_length_errors +
4433                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4434                 net_stats->rx_crc_errors;
4435
4436         net_stats->tx_aborted_errors =
4437                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4438                 stats_blk->stat_Dot3StatsLateCollisions);
4439
4440         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4441             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4442                 net_stats->tx_carrier_errors = 0;
4443         else {
4444                 net_stats->tx_carrier_errors =
4445                         (unsigned long)
4446                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4447         }
4448
4449         net_stats->tx_errors =
4450                 (unsigned long) 
4451                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4452                 +
4453                 net_stats->tx_aborted_errors +
4454                 net_stats->tx_carrier_errors;
4455
4456         return net_stats;
4457 }
4458
4459 /* All ethtool functions called with rtnl_lock */
4460
4461 static int
4462 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4463 {
4464         struct bnx2 *bp = netdev_priv(dev);
4465
4466         cmd->supported = SUPPORTED_Autoneg;
4467         if (bp->phy_flags & PHY_SERDES_FLAG) {
4468                 cmd->supported |= SUPPORTED_1000baseT_Full |
4469                         SUPPORTED_FIBRE;
4470
4471                 cmd->port = PORT_FIBRE;
4472         }
4473         else {
4474                 cmd->supported |= SUPPORTED_10baseT_Half |
4475                         SUPPORTED_10baseT_Full |
4476                         SUPPORTED_100baseT_Half |
4477                         SUPPORTED_100baseT_Full |
4478                         SUPPORTED_1000baseT_Full |
4479                         SUPPORTED_TP;
4480
4481                 cmd->port = PORT_TP;
4482         }
4483
4484         cmd->advertising = bp->advertising;
4485
4486         if (bp->autoneg & AUTONEG_SPEED) {
4487                 cmd->autoneg = AUTONEG_ENABLE;
4488         }
4489         else {
4490                 cmd->autoneg = AUTONEG_DISABLE;
4491         }
4492
4493         if (netif_carrier_ok(dev)) {
4494                 cmd->speed = bp->line_speed;
4495                 cmd->duplex = bp->duplex;
4496         }
4497         else {
4498                 cmd->speed = -1;
4499                 cmd->duplex = -1;
4500         }
4501
4502         cmd->transceiver = XCVR_INTERNAL;
4503         cmd->phy_address = bp->phy_addr;
4504
4505         return 0;
4506 }
4507   
4508 static int
4509 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4510 {
4511         struct bnx2 *bp = netdev_priv(dev);
4512         u8 autoneg = bp->autoneg;
4513         u8 req_duplex = bp->req_duplex;
4514         u16 req_line_speed = bp->req_line_speed;
4515         u32 advertising = bp->advertising;
4516
4517         if (cmd->autoneg == AUTONEG_ENABLE) {
4518                 autoneg |= AUTONEG_SPEED;
4519
4520                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED; 
4521
4522                 /* allow advertising 1 speed */
4523                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4524                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4525                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4526                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4527
4528                         if (bp->phy_flags & PHY_SERDES_FLAG)
4529                                 return -EINVAL;
4530
4531                         advertising = cmd->advertising;
4532
4533                 }
4534                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4535                         advertising = cmd->advertising;
4536                 }
4537                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4538                         return -EINVAL;
4539                 }
4540                 else {
4541                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4542                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4543                         }
4544                         else {
4545                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4546                         }
4547                 }
4548                 advertising |= ADVERTISED_Autoneg;
4549         }
4550         else {
4551                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4552                         if ((cmd->speed != SPEED_1000) ||
4553                                 (cmd->duplex != DUPLEX_FULL)) {
4554                                 return -EINVAL;
4555                         }
4556                 }
4557                 else if (cmd->speed == SPEED_1000) {
4558                         return -EINVAL;
4559                 }
4560                 autoneg &= ~AUTONEG_SPEED;
4561                 req_line_speed = cmd->speed;
4562                 req_duplex = cmd->duplex;
4563                 advertising = 0;
4564         }
4565
4566         bp->autoneg = autoneg;
4567         bp->advertising = advertising;
4568         bp->req_line_speed = req_line_speed;
4569         bp->req_duplex = req_duplex;
4570
4571         spin_lock_bh(&bp->phy_lock);
4572
4573         bnx2_setup_phy(bp);
4574
4575         spin_unlock_bh(&bp->phy_lock);
4576
4577         return 0;
4578 }
4579
4580 static void
4581 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4582 {
4583         struct bnx2 *bp = netdev_priv(dev);
4584
4585         strcpy(info->driver, DRV_MODULE_NAME);
4586         strcpy(info->version, DRV_MODULE_VERSION);
4587         strcpy(info->bus_info, pci_name(bp->pdev));
4588         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4589         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4590         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4591         info->fw_version[1] = info->fw_version[3] = '.';
4592         info->fw_version[5] = 0;
4593 }
4594
4595 #define BNX2_REGDUMP_LEN                (32 * 1024)
4596
4597 static int
4598 bnx2_get_regs_len(struct net_device *dev)
4599 {
4600         return BNX2_REGDUMP_LEN;
4601 }
4602
4603 static void
4604 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4605 {
4606         u32 *p = _p, i, offset;
4607         u8 *orig_p = _p;
4608         struct bnx2 *bp = netdev_priv(dev);
4609         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4610                                  0x0800, 0x0880, 0x0c00, 0x0c10,
4611                                  0x0c30, 0x0d08, 0x1000, 0x101c,
4612                                  0x1040, 0x1048, 0x1080, 0x10a4,
4613                                  0x1400, 0x1490, 0x1498, 0x14f0,
4614                                  0x1500, 0x155c, 0x1580, 0x15dc,
4615                                  0x1600, 0x1658, 0x1680, 0x16d8,
4616                                  0x1800, 0x1820, 0x1840, 0x1854,
4617                                  0x1880, 0x1894, 0x1900, 0x1984,
4618                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4619                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
4620                                  0x2000, 0x2030, 0x23c0, 0x2400,
4621                                  0x2800, 0x2820, 0x2830, 0x2850,
4622                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
4623                                  0x3c00, 0x3c94, 0x4000, 0x4010,
4624                                  0x4080, 0x4090, 0x43c0, 0x4458,
4625                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
4626                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
4627                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
4628                                  0x5fc0, 0x6000, 0x6400, 0x6428,
4629                                  0x6800, 0x6848, 0x684c, 0x6860,
4630                                  0x6888, 0x6910, 0x8000 };
4631
4632         regs->version = 0;
4633
4634         memset(p, 0, BNX2_REGDUMP_LEN);
4635
4636         if (!netif_running(bp->dev))
4637                 return;
4638
4639         i = 0;
4640         offset = reg_boundaries[0];
4641         p += offset;
4642         while (offset < BNX2_REGDUMP_LEN) {
4643                 *p++ = REG_RD(bp, offset);
4644                 offset += 4;
4645                 if (offset == reg_boundaries[i + 1]) {
4646                         offset = reg_boundaries[i + 2];
4647                         p = (u32 *) (orig_p + offset);
4648                         i += 2;
4649                 }
4650         }
4651 }
4652
4653 static void
4654 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4655 {
4656         struct bnx2 *bp = netdev_priv(dev);
4657
4658         if (bp->flags & NO_WOL_FLAG) {
4659                 wol->supported = 0;
4660                 wol->wolopts = 0;
4661         }
4662         else {
4663                 wol->supported = WAKE_MAGIC;
4664                 if (bp->wol)
4665                         wol->wolopts = WAKE_MAGIC;
4666                 else
4667                         wol->wolopts = 0;
4668         }
4669         memset(&wol->sopass, 0, sizeof(wol->sopass));
4670 }
4671
4672 static int
4673 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4674 {
4675         struct bnx2 *bp = netdev_priv(dev);
4676
4677         if (wol->wolopts & ~WAKE_MAGIC)
4678                 return -EINVAL;
4679
4680         if (wol->wolopts & WAKE_MAGIC) {
4681                 if (bp->flags & NO_WOL_FLAG)
4682                         return -EINVAL;
4683
4684                 bp->wol = 1;
4685         }
4686         else {
4687                 bp->wol = 0;
4688         }
4689         return 0;
4690 }
4691
4692 static int
4693 bnx2_nway_reset(struct net_device *dev)
4694 {
4695         struct bnx2 *bp = netdev_priv(dev);
4696         u32 bmcr;
4697
4698         if (!(bp->autoneg & AUTONEG_SPEED)) {
4699                 return -EINVAL;
4700         }
4701
4702         spin_lock_bh(&bp->phy_lock);
4703
4704         /* Force a link down visible on the other side */
4705         if (bp->phy_flags & PHY_SERDES_FLAG) {
4706                 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4707                 spin_unlock_bh(&bp->phy_lock);
4708
4709                 msleep(20);
4710
4711                 spin_lock_bh(&bp->phy_lock);
4712                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4713                         bp->current_interval = SERDES_AN_TIMEOUT;
4714                         bp->serdes_an_pending = 1;
4715                         mod_timer(&bp->timer, jiffies + bp->current_interval);
4716                 }
4717         }
4718
4719         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4720         bmcr &= ~BMCR_LOOPBACK;
4721         bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4722
4723         spin_unlock_bh(&bp->phy_lock);
4724
4725         return 0;
4726 }
4727
4728 static int
4729 bnx2_get_eeprom_len(struct net_device *dev)
4730 {
4731         struct bnx2 *bp = netdev_priv(dev);
4732
4733         if (bp->flash_info == NULL)
4734                 return 0;
4735
4736         return (int) bp->flash_size;
4737 }
4738
4739 static int
4740 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4741                 u8 *eebuf)
4742 {
4743         struct bnx2 *bp = netdev_priv(dev);
4744         int rc;
4745
4746         /* parameters already validated in ethtool_get_eeprom */
4747
4748         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4749
4750         return rc;
4751 }
4752
4753 static int
4754 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4755                 u8 *eebuf)
4756 {
4757         struct bnx2 *bp = netdev_priv(dev);
4758         int rc;
4759
4760         /* parameters already validated in ethtool_set_eeprom */
4761
4762         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4763
4764         return rc;
4765 }
4766
4767 static int
4768 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4769 {
4770         struct bnx2 *bp = netdev_priv(dev);
4771
4772         memset(coal, 0, sizeof(struct ethtool_coalesce));
4773
4774         coal->rx_coalesce_usecs = bp->rx_ticks;
4775         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4776         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4777         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4778
4779         coal->tx_coalesce_usecs = bp->tx_ticks;
4780         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4781         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4782         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4783
4784         coal->stats_block_coalesce_usecs = bp->stats_ticks;
4785
4786         return 0;
4787 }
4788
4789 static int
4790 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4791 {
4792         struct bnx2 *bp = netdev_priv(dev);
4793
4794         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4795         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4796
4797         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; 
4798         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4799
4800         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4801         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4802
4803         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4804         if (bp->rx_quick_cons_trip_int > 0xff)
4805                 bp->rx_quick_cons_trip_int = 0xff;
4806
4807         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4808         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4809
4810         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4811         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4812
4813         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4814         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4815
4816         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4817         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4818                 0xff;
4819
4820         bp->stats_ticks = coal->stats_block_coalesce_usecs;
4821         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4822         bp->stats_ticks &= 0xffff00;
4823
4824         if (netif_running(bp->dev)) {
4825                 bnx2_netif_stop(bp);
4826                 bnx2_init_nic(bp);
4827                 bnx2_netif_start(bp);
4828         }
4829
4830         return 0;
4831 }
4832
4833 static void
4834 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4835 {
4836         struct bnx2 *bp = netdev_priv(dev);
4837
4838         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
4839         ering->rx_mini_max_pending = 0;
4840         ering->rx_jumbo_max_pending = 0;
4841
4842         ering->rx_pending = bp->rx_ring_size;
4843         ering->rx_mini_pending = 0;
4844         ering->rx_jumbo_pending = 0;
4845
4846         ering->tx_max_pending = MAX_TX_DESC_CNT;
4847         ering->tx_pending = bp->tx_ring_size;
4848 }
4849
4850 static int
4851 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4852 {
4853         struct bnx2 *bp = netdev_priv(dev);
4854
4855         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
4856                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4857                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4858
4859                 return -EINVAL;
4860         }
4861         if (netif_running(bp->dev)) {
4862                 bnx2_netif_stop(bp);
4863                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4864                 bnx2_free_skbs(bp);
4865                 bnx2_free_mem(bp);
4866         }
4867
4868         bnx2_set_rx_ring_size(bp, ering->rx_pending);
4869         bp->tx_ring_size = ering->tx_pending;
4870
4871         if (netif_running(bp->dev)) {
4872                 int rc;
4873
4874                 rc = bnx2_alloc_mem(bp);
4875                 if (rc)
4876                         return rc;
4877                 bnx2_init_nic(bp);
4878                 bnx2_netif_start(bp);
4879         }
4880
4881         return 0;
4882 }
4883
4884 static void
4885 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4886 {
4887         struct bnx2 *bp = netdev_priv(dev);
4888
4889         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4890         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4891         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4892 }
4893
4894 static int
4895 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4896 {
4897         struct bnx2 *bp = netdev_priv(dev);
4898
4899         bp->req_flow_ctrl = 0;
4900         if (epause->rx_pause)
4901                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4902         if (epause->tx_pause)
4903                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4904
4905         if (epause->autoneg) {
4906                 bp->autoneg |= AUTONEG_FLOW_CTRL;
4907         }
4908         else {
4909                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4910         }
4911
4912         spin_lock_bh(&bp->phy_lock);
4913
4914         bnx2_setup_phy(bp);
4915
4916         spin_unlock_bh(&bp->phy_lock);
4917
4918         return 0;
4919 }
4920
4921 static u32
4922 bnx2_get_rx_csum(struct net_device *dev)
4923 {
4924         struct bnx2 *bp = netdev_priv(dev);
4925
4926         return bp->rx_csum;
4927 }
4928
4929 static int
4930 bnx2_set_rx_csum(struct net_device *dev, u32 data)
4931 {
4932         struct bnx2 *bp = netdev_priv(dev);
4933
4934         bp->rx_csum = data;
4935         return 0;
4936 }
4937
4938 #define BNX2_NUM_STATS 45
4939
4940 static struct {
4941         char string[ETH_GSTRING_LEN];
4942 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4943         { "rx_bytes" },
4944         { "rx_error_bytes" },
4945         { "tx_bytes" },
4946         { "tx_error_bytes" },
4947         { "rx_ucast_packets" },
4948         { "rx_mcast_packets" },
4949         { "rx_bcast_packets" },
4950         { "tx_ucast_packets" },
4951         { "tx_mcast_packets" },
4952         { "tx_bcast_packets" },
4953         { "tx_mac_errors" },
4954         { "tx_carrier_errors" },
4955         { "rx_crc_errors" },
4956         { "rx_align_errors" },
4957         { "tx_single_collisions" },
4958         { "tx_multi_collisions" },
4959         { "tx_deferred" },
4960         { "tx_excess_collisions" },
4961         { "tx_late_collisions" },
4962         { "tx_total_collisions" },
4963         { "rx_fragments" },
4964         { "rx_jabbers" },
4965         { "rx_undersize_packets" },
4966         { "rx_oversize_packets" },
4967         { "rx_64_byte_packets" },
4968         { "rx_65_to_127_byte_packets" },
4969         { "rx_128_to_255_byte_packets" },
4970         { "rx_256_to_511_byte_packets" },
4971         { "rx_512_to_1023_byte_packets" },
4972         { "rx_1024_to_1522_byte_packets" },
4973         { "rx_1523_to_9022_byte_packets" },
4974         { "tx_64_byte_packets" },
4975         { "tx_65_to_127_byte_packets" },
4976         { "tx_128_to_255_byte_packets" },
4977         { "tx_256_to_511_byte_packets" },
4978         { "tx_512_to_1023_byte_packets" },
4979         { "tx_1024_to_1522_byte_packets" },
4980         { "tx_1523_to_9022_byte_packets" },
4981         { "rx_xon_frames" },
4982         { "rx_xoff_frames" },
4983         { "tx_xon_frames" },
4984         { "tx_xoff_frames" },
4985         { "rx_mac_ctrl_frames" },
4986         { "rx_filtered_packets" },
4987         { "rx_discards" },
4988 };
4989
4990 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4991
4992 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4993     STATS_OFFSET32(stat_IfHCInOctets_hi),
4994     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4995     STATS_OFFSET32(stat_IfHCOutOctets_hi),
4996     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
4997     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
4998     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
4999     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5000     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5001     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5002     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5003     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5004     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),                 
5005     STATS_OFFSET32(stat_Dot3StatsFCSErrors),                          
5006     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),                    
5007     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),              
5008     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),            
5009     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),              
5010     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),                
5011     STATS_OFFSET32(stat_Dot3StatsLateCollisions),                     
5012     STATS_OFFSET32(stat_EtherStatsCollisions),                        
5013     STATS_OFFSET32(stat_EtherStatsFragments),                         
5014     STATS_OFFSET32(stat_EtherStatsJabbers),                           
5015     STATS_OFFSET32(stat_EtherStatsUndersizePkts),                     
5016     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),                     
5017     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),                    
5018     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),         
5019     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),        
5020     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),        
5021     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),       
5022     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),      
5023     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),      
5024     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),                    
5025     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),         
5026     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),        
5027     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),        
5028     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),       
5029     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),      
5030     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),      
5031     STATS_OFFSET32(stat_XonPauseFramesReceived),                      
5032     STATS_OFFSET32(stat_XoffPauseFramesReceived),                     
5033     STATS_OFFSET32(stat_OutXonSent),                                  
5034     STATS_OFFSET32(stat_OutXoffSent),                                 
5035     STATS_OFFSET32(stat_MacControlFramesReceived),                    
5036     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),                  
5037     STATS_OFFSET32(stat_IfInMBUFDiscards),                            
5038 };
5039
5040 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5041  * skipped because of errata.
5042  */               
5043 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5044         8,0,8,8,8,8,8,8,8,8,
5045         4,0,4,4,4,4,4,4,4,4,
5046         4,4,4,4,4,4,4,4,4,4,
5047         4,4,4,4,4,4,4,4,4,4,
5048         4,4,4,4,4,
5049 };
5050
5051 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5052         8,0,8,8,8,8,8,8,8,8,
5053         4,4,4,4,4,4,4,4,4,4,
5054         4,4,4,4,4,4,4,4,4,4,
5055         4,4,4,4,4,4,4,4,4,4,
5056         4,4,4,4,4,
5057 };
5058
5059 #define BNX2_NUM_TESTS 6
5060
5061 static struct {
5062         char string[ETH_GSTRING_LEN];
5063 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5064         { "register_test (offline)" },
5065         { "memory_test (offline)" },
5066         { "loopback_test (offline)" },
5067         { "nvram_test (online)" },
5068         { "interrupt_test (online)" },
5069         { "link_test (online)" },
5070 };
5071
5072 static int
5073 bnx2_self_test_count(struct net_device *dev)
5074 {
5075         return BNX2_NUM_TESTS;
5076 }
5077
5078 static void
5079 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5080 {
5081         struct bnx2 *bp = netdev_priv(dev);
5082
5083         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5084         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5085                 bnx2_netif_stop(bp);
5086                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5087                 bnx2_free_skbs(bp);
5088
5089                 if (bnx2_test_registers(bp) != 0) {
5090                         buf[0] = 1;
5091                         etest->flags |= ETH_TEST_FL_FAILED;
5092                 }
5093                 if (bnx2_test_memory(bp) != 0) {
5094                         buf[1] = 1;
5095                         etest->flags |= ETH_TEST_FL_FAILED;
5096                 }
5097                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5098                         etest->flags |= ETH_TEST_FL_FAILED;
5099
5100                 if (!netif_running(bp->dev)) {
5101                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5102                 }
5103                 else {
5104                         bnx2_init_nic(bp);
5105                         bnx2_netif_start(bp);
5106                 }
5107
5108                 /* wait for link up */
5109                 msleep_interruptible(3000);
5110                 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5111                         msleep_interruptible(4000);
5112         }
5113
5114         if (bnx2_test_nvram(bp) != 0) {
5115                 buf[3] = 1;
5116                 etest->flags |= ETH_TEST_FL_FAILED;
5117         }
5118         if (bnx2_test_intr(bp) != 0) {
5119                 buf[4] = 1;
5120                 etest->flags |= ETH_TEST_FL_FAILED;
5121         }
5122
5123         if (bnx2_test_link(bp) != 0) {
5124                 buf[5] = 1;
5125                 etest->flags |= ETH_TEST_FL_FAILED;
5126
5127         }
5128 }
5129
5130 static void
5131 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5132 {
5133         switch (stringset) {
5134         case ETH_SS_STATS:
5135                 memcpy(buf, bnx2_stats_str_arr,
5136                         sizeof(bnx2_stats_str_arr));
5137                 break;
5138         case ETH_SS_TEST:
5139                 memcpy(buf, bnx2_tests_str_arr,
5140                         sizeof(bnx2_tests_str_arr));
5141                 break;
5142         }
5143 }
5144
5145 static int
5146 bnx2_get_stats_count(struct net_device *dev)
5147 {
5148         return BNX2_NUM_STATS;
5149 }
5150
5151 static void
5152 bnx2_get_ethtool_stats(struct net_device *dev,
5153                 struct ethtool_stats *stats, u64 *buf)
5154 {
5155         struct bnx2 *bp = netdev_priv(dev);
5156         int i;
5157         u32 *hw_stats = (u32 *) bp->stats_blk;
5158         u8 *stats_len_arr = NULL;
5159
5160         if (hw_stats == NULL) {
5161                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5162                 return;
5163         }
5164
5165         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5166             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5167             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5168             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5169                 stats_len_arr = bnx2_5706_stats_len_arr;
5170         else
5171                 stats_len_arr = bnx2_5708_stats_len_arr;
5172
5173         for (i = 0; i < BNX2_NUM_STATS; i++) {
5174                 if (stats_len_arr[i] == 0) {
5175                         /* skip this counter */
5176                         buf[i] = 0;
5177                         continue;
5178                 }
5179                 if (stats_len_arr[i] == 4) {
5180                         /* 4-byte counter */
5181                         buf[i] = (u64)
5182                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5183                         continue;
5184                 }
5185                 /* 8-byte counter */
5186                 buf[i] = (((u64) *(hw_stats +
5187                                         bnx2_stats_offset_arr[i])) << 32) +
5188                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5189         }
5190 }
5191
5192 static int
5193 bnx2_phys_id(struct net_device *dev, u32 data)
5194 {
5195         struct bnx2 *bp = netdev_priv(dev);
5196         int i;
5197         u32 save;
5198
5199         if (data == 0)
5200                 data = 2;
5201
5202         save = REG_RD(bp, BNX2_MISC_CFG);
5203         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5204
5205         for (i = 0; i < (data * 2); i++) {
5206                 if ((i % 2) == 0) {
5207                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5208                 }
5209                 else {
5210                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5211                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5212                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5213                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5214                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5215                                 BNX2_EMAC_LED_TRAFFIC);
5216                 }
5217                 msleep_interruptible(500);
5218                 if (signal_pending(current))
5219                         break;
5220         }
5221         REG_WR(bp, BNX2_EMAC_LED, 0);
5222         REG_WR(bp, BNX2_MISC_CFG, save);
5223         return 0;
5224 }
5225
5226 static struct ethtool_ops bnx2_ethtool_ops = {
5227         .get_settings           = bnx2_get_settings,
5228         .set_settings           = bnx2_set_settings,
5229         .get_drvinfo            = bnx2_get_drvinfo,
5230         .get_regs_len           = bnx2_get_regs_len,
5231         .get_regs               = bnx2_get_regs,
5232         .get_wol                = bnx2_get_wol,
5233         .set_wol                = bnx2_set_wol,
5234         .nway_reset             = bnx2_nway_reset,
5235         .get_link               = ethtool_op_get_link,
5236         .get_eeprom_len         = bnx2_get_eeprom_len,
5237         .get_eeprom             = bnx2_get_eeprom,
5238         .set_eeprom             = bnx2_set_eeprom,
5239         .get_coalesce           = bnx2_get_coalesce,
5240         .set_coalesce           = bnx2_set_coalesce,
5241         .get_ringparam          = bnx2_get_ringparam,
5242         .set_ringparam          = bnx2_set_ringparam,
5243         .get_pauseparam         = bnx2_get_pauseparam,
5244         .set_pauseparam         = bnx2_set_pauseparam,
5245         .get_rx_csum            = bnx2_get_rx_csum,
5246         .set_rx_csum            = bnx2_set_rx_csum,
5247         .get_tx_csum            = ethtool_op_get_tx_csum,
5248         .set_tx_csum            = ethtool_op_set_tx_csum,
5249         .get_sg                 = ethtool_op_get_sg,
5250         .set_sg                 = ethtool_op_set_sg,
5251 #ifdef BCM_TSO
5252         .get_tso                = ethtool_op_get_tso,
5253         .set_tso                = ethtool_op_set_tso,
5254 #endif
5255         .self_test_count        = bnx2_self_test_count,
5256         .self_test              = bnx2_self_test,
5257         .get_strings            = bnx2_get_strings,
5258         .phys_id                = bnx2_phys_id,
5259         .get_stats_count        = bnx2_get_stats_count,
5260         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5261         .get_perm_addr          = ethtool_op_get_perm_addr,
5262 };
5263
5264 /* Called with rtnl_lock */
5265 static int
5266 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5267 {
5268         struct mii_ioctl_data *data = if_mii(ifr);
5269         struct bnx2 *bp = netdev_priv(dev);
5270         int err;
5271
5272         switch(cmd) {
5273         case SIOCGMIIPHY:
5274                 data->phy_id = bp->phy_addr;
5275
5276                 /* fallthru */
5277         case SIOCGMIIREG: {
5278                 u32 mii_regval;
5279
5280                 spin_lock_bh(&bp->phy_lock);
5281                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5282                 spin_unlock_bh(&bp->phy_lock);
5283
5284                 data->val_out = mii_regval;
5285
5286                 return err;
5287         }
5288
5289         case SIOCSMIIREG:
5290                 if (!capable(CAP_NET_ADMIN))
5291                         return -EPERM;
5292
5293                 spin_lock_bh(&bp->phy_lock);
5294                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5295                 spin_unlock_bh(&bp->phy_lock);
5296
5297                 return err;
5298
5299         default:
5300                 /* do nothing */
5301                 break;
5302         }
5303         return -EOPNOTSUPP;
5304 }
5305
5306 /* Called with rtnl_lock */
5307 static int
5308 bnx2_change_mac_addr(struct net_device *dev, void *p)
5309 {
5310         struct sockaddr *addr = p;
5311         struct bnx2 *bp = netdev_priv(dev);
5312
5313         if (!is_valid_ether_addr(addr->sa_data))
5314                 return -EINVAL;
5315
5316         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5317         if (netif_running(dev))
5318                 bnx2_set_mac_addr(bp);
5319
5320         return 0;
5321 }
5322
5323 /* Called with rtnl_lock */
5324 static int
5325 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5326 {
5327         struct bnx2 *bp = netdev_priv(dev);
5328
5329         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5330                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5331                 return -EINVAL;
5332
5333         dev->mtu = new_mtu;
5334         if (netif_running(dev)) {
5335                 bnx2_netif_stop(bp);
5336
5337                 bnx2_init_nic(bp);
5338
5339                 bnx2_netif_start(bp);
5340         }
5341         return 0;
5342 }
5343
5344 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5345 static void
5346 poll_bnx2(struct net_device *dev)
5347 {
5348         struct bnx2 *bp = netdev_priv(dev);
5349
5350         disable_irq(bp->pdev->irq);
5351         bnx2_interrupt(bp->pdev->irq, dev, NULL);
5352         enable_irq(bp->pdev->irq);
5353 }
5354 #endif
5355
5356 static int __devinit
5357 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5358 {
5359         struct bnx2 *bp;
5360         unsigned long mem_len;
5361         int rc;
5362         u32 reg;
5363
5364         SET_MODULE_OWNER(dev);
5365         SET_NETDEV_DEV(dev, &pdev->dev);
5366         bp = netdev_priv(dev);
5367
5368         bp->flags = 0;
5369         bp->phy_flags = 0;
5370
5371         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5372         rc = pci_enable_device(pdev);
5373         if (rc) {
5374                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5375                 goto err_out;
5376         }
5377
5378         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5379                 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5380                        "aborting.\n");
5381                 rc = -ENODEV;
5382                 goto err_out_disable;
5383         }
5384
5385         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5386         if (rc) {
5387                 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5388                 goto err_out_disable;
5389         }
5390
5391         pci_set_master(pdev);
5392
5393         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5394         if (bp->pm_cap == 0) {
5395                 printk(KERN_ERR PFX "Cannot find power management capability, "
5396                                "aborting.\n");
5397                 rc = -EIO;
5398                 goto err_out_release;
5399         }
5400
5401         bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5402         if (bp->pcix_cap == 0) {
5403                 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5404                 rc = -EIO;
5405                 goto err_out_release;
5406         }
5407
5408         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5409                 bp->flags |= USING_DAC_FLAG;
5410                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5411                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5412                                "failed, aborting.\n");
5413                         rc = -EIO;
5414                         goto err_out_release;
5415                 }
5416         }
5417         else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5418                 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5419                 rc = -EIO;
5420                 goto err_out_release;
5421         }
5422
5423         bp->dev = dev;
5424         bp->pdev = pdev;
5425
5426         spin_lock_init(&bp->phy_lock);
5427         spin_lock_init(&bp->tx_lock);
5428         INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5429
5430         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5431         mem_len = MB_GET_CID_ADDR(17);
5432         dev->mem_end = dev->mem_start + mem_len;
5433         dev->irq = pdev->irq;
5434
5435         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5436
5437         if (!bp->regview) {
5438                 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5439                 rc = -ENOMEM;
5440                 goto err_out_release;
5441         }
5442
5443         /* Configure byte swap and enable write to the reg_window registers.
5444          * Rely on CPU to do target byte swapping on big endian systems
5445          * The chip's target access swapping will not swap all accesses
5446          */
5447         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5448                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5449                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5450
5451         bnx2_set_power_state(bp, PCI_D0);
5452
5453         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5454
5455         /* Get bus information. */
5456         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5457         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5458                 u32 clkreg;
5459
5460                 bp->flags |= PCIX_FLAG;
5461
5462                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5463                 
5464                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5465                 switch (clkreg) {
5466                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5467                         bp->bus_speed_mhz = 133;
5468                         break;
5469
5470                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5471                         bp->bus_speed_mhz = 100;
5472                         break;
5473
5474                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5475                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5476                         bp->bus_speed_mhz = 66;
5477                         break;
5478
5479                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5480                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5481                         bp->bus_speed_mhz = 50;
5482                         break;
5483
5484                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5485                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5486                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5487                         bp->bus_speed_mhz = 33;
5488                         break;
5489                 }
5490         }
5491         else {
5492                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5493                         bp->bus_speed_mhz = 66;
5494                 else
5495                         bp->bus_speed_mhz = 33;
5496         }
5497
5498         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5499                 bp->flags |= PCI_32BIT_FLAG;
5500
5501         /* 5706A0 may falsely detect SERR and PERR. */
5502         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5503                 reg = REG_RD(bp, PCI_COMMAND);
5504                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5505                 REG_WR(bp, PCI_COMMAND, reg);
5506         }
5507         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5508                 !(bp->flags & PCIX_FLAG)) {
5509
5510                 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5511                        "aborting.\n");
5512                 goto err_out_unmap;
5513         }
5514
5515         bnx2_init_nvram(bp);
5516
5517         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5518
5519         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5520             BNX2_SHM_HDR_SIGNATURE_SIG)
5521                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5522         else
5523                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5524
5525         /* Get the permanent MAC address.  First we need to make sure the
5526          * firmware is actually running.
5527          */
5528         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5529
5530         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5531             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5532                 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5533                 rc = -ENODEV;
5534                 goto err_out_unmap;
5535         }
5536
5537         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5538
5539         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5540         bp->mac_addr[0] = (u8) (reg >> 8);
5541         bp->mac_addr[1] = (u8) reg;
5542
5543         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5544         bp->mac_addr[2] = (u8) (reg >> 24);
5545         bp->mac_addr[3] = (u8) (reg >> 16);
5546         bp->mac_addr[4] = (u8) (reg >> 8);
5547         bp->mac_addr[5] = (u8) reg;
5548
5549         bp->tx_ring_size = MAX_TX_DESC_CNT;
5550         bnx2_set_rx_ring_size(bp, 100);
5551
5552         bp->rx_csum = 1;
5553
5554         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5555
5556         bp->tx_quick_cons_trip_int = 20;
5557         bp->tx_quick_cons_trip = 20;
5558         bp->tx_ticks_int = 80;
5559         bp->tx_ticks = 80;
5560                 
5561         bp->rx_quick_cons_trip_int = 6;
5562         bp->rx_quick_cons_trip = 6;
5563         bp->rx_ticks_int = 18;
5564         bp->rx_ticks = 18;
5565
5566         bp->stats_ticks = 1000000 & 0xffff00;
5567
5568         bp->timer_interval =  HZ;
5569         bp->current_interval =  HZ;
5570
5571         bp->phy_addr = 1;
5572
5573         /* Disable WOL support if we are running on a SERDES chip. */
5574         if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5575                 bp->phy_flags |= PHY_SERDES_FLAG;
5576                 bp->flags |= NO_WOL_FLAG;
5577                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5578                         bp->phy_addr = 2;
5579                         reg = REG_RD_IND(bp, bp->shmem_base +
5580                                          BNX2_SHARED_HW_CFG_CONFIG);
5581                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5582                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5583                 }
5584         }
5585
5586         if (CHIP_NUM(bp) == CHIP_NUM_5708)
5587                 bp->flags |= NO_WOL_FLAG;
5588
5589         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5590                 bp->tx_quick_cons_trip_int =
5591                         bp->tx_quick_cons_trip;
5592                 bp->tx_ticks_int = bp->tx_ticks;
5593                 bp->rx_quick_cons_trip_int =
5594                         bp->rx_quick_cons_trip;
5595                 bp->rx_ticks_int = bp->rx_ticks;
5596                 bp->comp_prod_trip_int = bp->comp_prod_trip;
5597                 bp->com_ticks_int = bp->com_ticks;
5598                 bp->cmd_ticks_int = bp->cmd_ticks;
5599         }
5600
5601         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5602         bp->req_line_speed = 0;
5603         if (bp->phy_flags & PHY_SERDES_FLAG) {
5604                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5605
5606                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5607                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5608                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5609                         bp->autoneg = 0;
5610                         bp->req_line_speed = bp->line_speed = SPEED_1000;
5611                         bp->req_duplex = DUPLEX_FULL;
5612                 }
5613         }
5614         else {
5615                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5616         }
5617
5618         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5619
5620         init_timer(&bp->timer);
5621         bp->timer.expires = RUN_AT(bp->timer_interval);
5622         bp->timer.data = (unsigned long) bp;
5623         bp->timer.function = bnx2_timer;
5624
5625         return 0;
5626
5627 err_out_unmap:
5628         if (bp->regview) {
5629                 iounmap(bp->regview);
5630                 bp->regview = NULL;
5631         }
5632
5633 err_out_release:
5634         pci_release_regions(pdev);
5635
5636 err_out_disable:
5637         pci_disable_device(pdev);
5638         pci_set_drvdata(pdev, NULL);
5639
5640 err_out:
5641         return rc;
5642 }
5643
5644 static int __devinit
5645 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5646 {
5647         static int version_printed = 0;
5648         struct net_device *dev = NULL;
5649         struct bnx2 *bp;
5650         int rc, i;
5651
5652         if (version_printed++ == 0)
5653                 printk(KERN_INFO "%s", version);
5654
5655         /* dev zeroed in init_etherdev */
5656         dev = alloc_etherdev(sizeof(*bp));
5657
5658         if (!dev)
5659                 return -ENOMEM;
5660
5661         rc = bnx2_init_board(pdev, dev);
5662         if (rc < 0) {
5663                 free_netdev(dev);
5664                 return rc;
5665         }
5666
5667         dev->open = bnx2_open;
5668         dev->hard_start_xmit = bnx2_start_xmit;
5669         dev->stop = bnx2_close;
5670         dev->get_stats = bnx2_get_stats;
5671         dev->set_multicast_list = bnx2_set_rx_mode;
5672         dev->do_ioctl = bnx2_ioctl;
5673         dev->set_mac_address = bnx2_change_mac_addr;
5674         dev->change_mtu = bnx2_change_mtu;
5675         dev->tx_timeout = bnx2_tx_timeout;
5676         dev->watchdog_timeo = TX_TIMEOUT;
5677 #ifdef BCM_VLAN
5678         dev->vlan_rx_register = bnx2_vlan_rx_register;
5679         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5680 #endif
5681         dev->poll = bnx2_poll;
5682         dev->ethtool_ops = &bnx2_ethtool_ops;
5683         dev->weight = 64;
5684
5685         bp = netdev_priv(dev);
5686
5687 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5688         dev->poll_controller = poll_bnx2;
5689 #endif
5690
5691         if ((rc = register_netdev(dev))) {
5692                 printk(KERN_ERR PFX "Cannot register net device\n");
5693                 if (bp->regview)
5694                         iounmap(bp->regview);
5695                 pci_release_regions(pdev);
5696                 pci_disable_device(pdev);
5697                 pci_set_drvdata(pdev, NULL);
5698                 free_netdev(dev);
5699                 return rc;
5700         }
5701
5702         pci_set_drvdata(pdev, dev);
5703
5704         memcpy(dev->dev_addr, bp->mac_addr, 6);
5705         memcpy(dev->perm_addr, bp->mac_addr, 6);
5706         bp->name = board_info[ent->driver_data].name,
5707         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5708                 "IRQ %d, ",
5709                 dev->name,
5710                 bp->name,
5711                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5712                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5713                 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5714                 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5715                 bp->bus_speed_mhz,
5716                 dev->base_addr,
5717                 bp->pdev->irq);
5718
5719         printk("node addr ");
5720         for (i = 0; i < 6; i++)
5721                 printk("%2.2x", dev->dev_addr[i]);
5722         printk("\n");
5723
5724         dev->features |= NETIF_F_SG;
5725         if (bp->flags & USING_DAC_FLAG)
5726                 dev->features |= NETIF_F_HIGHDMA;
5727         dev->features |= NETIF_F_IP_CSUM;
5728 #ifdef BCM_VLAN
5729         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5730 #endif
5731 #ifdef BCM_TSO
5732         dev->features |= NETIF_F_TSO;
5733 #endif
5734
5735         netif_carrier_off(bp->dev);
5736
5737         return 0;
5738 }
5739
5740 static void __devexit
5741 bnx2_remove_one(struct pci_dev *pdev)
5742 {
5743         struct net_device *dev = pci_get_drvdata(pdev);
5744         struct bnx2 *bp = netdev_priv(dev);
5745
5746         flush_scheduled_work();
5747
5748         unregister_netdev(dev);
5749
5750         if (bp->regview)
5751                 iounmap(bp->regview);
5752
5753         free_netdev(dev);
5754         pci_release_regions(pdev);
5755         pci_disable_device(pdev);
5756         pci_set_drvdata(pdev, NULL);
5757 }
5758
5759 static int
5760 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5761 {
5762         struct net_device *dev = pci_get_drvdata(pdev);
5763         struct bnx2 *bp = netdev_priv(dev);
5764         u32 reset_code;
5765
5766         if (!netif_running(dev))
5767                 return 0;
5768
5769         flush_scheduled_work();
5770         bnx2_netif_stop(bp);
5771         netif_device_detach(dev);
5772         del_timer_sync(&bp->timer);
5773         if (bp->flags & NO_WOL_FLAG)
5774                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5775         else if (bp->wol)
5776                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5777         else
5778                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5779         bnx2_reset_chip(bp, reset_code);
5780         bnx2_free_skbs(bp);
5781         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5782         return 0;
5783 }
5784
5785 static int
5786 bnx2_resume(struct pci_dev *pdev)
5787 {
5788         struct net_device *dev = pci_get_drvdata(pdev);
5789         struct bnx2 *bp = netdev_priv(dev);
5790
5791         if (!netif_running(dev))
5792                 return 0;
5793
5794         bnx2_set_power_state(bp, PCI_D0);
5795         netif_device_attach(dev);
5796         bnx2_init_nic(bp);
5797         bnx2_netif_start(bp);
5798         return 0;
5799 }
5800
5801 static struct pci_driver bnx2_pci_driver = {
5802         .name           = DRV_MODULE_NAME,
5803         .id_table       = bnx2_pci_tbl,
5804         .probe          = bnx2_init_one,
5805         .remove         = __devexit_p(bnx2_remove_one),
5806         .suspend        = bnx2_suspend,
5807         .resume         = bnx2_resume,
5808 };
5809
5810 static int __init bnx2_init(void)
5811 {
5812         return pci_module_init(&bnx2_pci_driver);
5813 }
5814
5815 static void __exit bnx2_cleanup(void)
5816 {
5817         pci_unregister_driver(&bnx2_pci_driver);
5818 }
5819
5820 module_init(bnx2_init);
5821 module_exit(bnx2_cleanup);
5822
5823
5824